xref: /freebsd/sys/opencrypto/ktls_ocf.c (revision ac099daf6742ead81ea7ea86351a8ef4e783041b)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Netflix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/ktls.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/uio.h>
44 #include <opencrypto/cryptodev.h>
45 
46 struct ocf_session {
47 	crypto_session_t sid;
48 	crypto_session_t mac_sid;
49 	int mac_len;
50 	struct mtx lock;
51 	bool implicit_iv;
52 
53 	/* Only used for TLS 1.0 with the implicit IV. */
54 #ifdef INVARIANTS
55 	bool in_progress;
56 	uint64_t next_seqno;
57 #endif
58 	char iv[AES_BLOCK_LEN];
59 };
60 
61 struct ocf_operation {
62 	struct ocf_session *os;
63 	bool done;
64 };
65 
66 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
67 
68 SYSCTL_DECL(_kern_ipc_tls);
69 SYSCTL_DECL(_kern_ipc_tls_stats);
70 
71 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
72     CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
73     "Kernel TLS offload via OCF stats");
74 
75 static COUNTER_U64_DEFINE_EARLY(ocf_tls10_cbc_crypts);
76 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_crypts,
77     CTLFLAG_RD, &ocf_tls10_cbc_crypts,
78     "Total number of OCF TLS 1.0 CBC encryption operations");
79 
80 static COUNTER_U64_DEFINE_EARLY(ocf_tls11_cbc_crypts);
81 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_crypts,
82     CTLFLAG_RD, &ocf_tls11_cbc_crypts,
83     "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
84 
85 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_gcm_crypts);
86 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_crypts,
87     CTLFLAG_RD, &ocf_tls12_gcm_crypts,
88     "Total number of OCF TLS 1.2 GCM encryption operations");
89 
90 static COUNTER_U64_DEFINE_EARLY(ocf_tls12_chacha20_crypts);
91 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_chacha20_crypts,
92     CTLFLAG_RD, &ocf_tls12_chacha20_crypts,
93     "Total number of OCF TLS 1.2 Chacha20-Poly1305 encryption operations");
94 
95 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_gcm_crypts);
96 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_crypts,
97     CTLFLAG_RD, &ocf_tls13_gcm_crypts,
98     "Total number of OCF TLS 1.3 GCM encryption operations");
99 
100 static COUNTER_U64_DEFINE_EARLY(ocf_tls13_chacha20_crypts);
101 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_chacha20_crypts,
102     CTLFLAG_RD, &ocf_tls13_chacha20_crypts,
103     "Total number of OCF TLS 1.3 Chacha20-Poly1305 encryption operations");
104 
105 static COUNTER_U64_DEFINE_EARLY(ocf_inplace);
106 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
107     CTLFLAG_RD, &ocf_inplace,
108     "Total number of OCF in-place operations");
109 
110 static COUNTER_U64_DEFINE_EARLY(ocf_separate_output);
111 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
112     CTLFLAG_RD, &ocf_separate_output,
113     "Total number of OCF operations with a separate output buffer");
114 
115 static COUNTER_U64_DEFINE_EARLY(ocf_retries);
116 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
117     &ocf_retries,
118     "Number of OCF encryption operation retries");
119 
120 static int
121 ktls_ocf_callback_sync(struct cryptop *crp __unused)
122 {
123 	return (0);
124 }
125 
126 static int
127 ktls_ocf_callback_async(struct cryptop *crp)
128 {
129 	struct ocf_operation *oo;
130 
131 	oo = crp->crp_opaque;
132 	mtx_lock(&oo->os->lock);
133 	oo->done = true;
134 	mtx_unlock(&oo->os->lock);
135 	wakeup(oo);
136 	return (0);
137 }
138 
139 static int
140 ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
141 {
142 	struct ocf_operation oo;
143 	int error;
144 	bool async;
145 
146 	oo.os = os;
147 	oo.done = false;
148 
149 	crp->crp_opaque = &oo;
150 	for (;;) {
151 		async = !CRYPTO_SESS_SYNC(crp->crp_session);
152 		crp->crp_callback = async ? ktls_ocf_callback_async :
153 		    ktls_ocf_callback_sync;
154 
155 		error = crypto_dispatch(crp);
156 		if (error)
157 			break;
158 		if (async) {
159 			mtx_lock(&os->lock);
160 			while (!oo.done)
161 				mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
162 			mtx_unlock(&os->lock);
163 		}
164 
165 		if (crp->crp_etype != EAGAIN) {
166 			error = crp->crp_etype;
167 			break;
168 		}
169 
170 		crp->crp_etype = 0;
171 		crp->crp_flags &= ~CRYPTO_F_DONE;
172 		oo.done = false;
173 		counter_u64_add(ocf_retries, 1);
174 	}
175 	return (error);
176 }
177 
178 static int
179 ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls,
180     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
181     struct iovec *outiov, int iniovcnt, int outiovcnt, uint64_t seqno,
182     uint8_t record_type __unused)
183 {
184 	struct uio uio, out_uio;
185 	struct tls_mac_data ad;
186 	struct cryptop crp;
187 	struct ocf_session *os;
188 	struct iovec iov[iniovcnt + 2];
189 	struct iovec out_iov[outiovcnt + 1];
190 	int i, error;
191 	uint16_t tls_comp_len;
192 	uint8_t pad;
193 	bool inplace;
194 
195 	os = tls->cipher;
196 
197 #ifdef INVARIANTS
198 	if (os->implicit_iv) {
199 		mtx_lock(&os->lock);
200 		KASSERT(!os->in_progress,
201 		    ("concurrent implicit IV encryptions"));
202 		if (os->next_seqno != seqno) {
203 			printf("KTLS CBC: TLS records out of order.  "
204 			    "Expected %ju, got %ju\n",
205 			    (uintmax_t)os->next_seqno, (uintmax_t)seqno);
206 			mtx_unlock(&os->lock);
207 			return (EINVAL);
208 		}
209 		os->in_progress = true;
210 		mtx_unlock(&os->lock);
211 	}
212 #endif
213 
214 	/*
215 	 * Compute the payload length.
216 	 *
217 	 * XXX: This could be easily computed O(1) from the mbuf
218 	 * fields, but we don't have those accessible here.  Can
219 	 * at least compute inplace as well while we are here.
220 	 */
221 	tls_comp_len = 0;
222 	inplace = iniovcnt == outiovcnt;
223 	for (i = 0; i < iniovcnt; i++) {
224 		tls_comp_len += iniov[i].iov_len;
225 		if (inplace &&
226 		    (i >= outiovcnt || iniov[i].iov_base != outiov[i].iov_base))
227 			inplace = false;
228 	}
229 
230 	/* Initialize the AAD. */
231 	ad.seq = htobe64(seqno);
232 	ad.type = hdr->tls_type;
233 	ad.tls_vmajor = hdr->tls_vmajor;
234 	ad.tls_vminor = hdr->tls_vminor;
235 	ad.tls_length = htons(tls_comp_len);
236 
237 	/* First, compute the MAC. */
238 	iov[0].iov_base = &ad;
239 	iov[0].iov_len = sizeof(ad);
240 	memcpy(&iov[1], iniov, sizeof(*iniov) * iniovcnt);
241 	iov[iniovcnt + 1].iov_base = trailer;
242 	iov[iniovcnt + 1].iov_len = os->mac_len;
243 	uio.uio_iov = iov;
244 	uio.uio_iovcnt = iniovcnt + 2;
245 	uio.uio_offset = 0;
246 	uio.uio_segflg = UIO_SYSSPACE;
247 	uio.uio_td = curthread;
248 	uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len;
249 
250 	crypto_initreq(&crp, os->mac_sid);
251 	crp.crp_payload_start = 0;
252 	crp.crp_payload_length = sizeof(ad) + tls_comp_len;
253 	crp.crp_digest_start = crp.crp_payload_length;
254 	crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST;
255 	crp.crp_flags = CRYPTO_F_CBIMM;
256 	crypto_use_uio(&crp, &uio);
257 	error = ktls_ocf_dispatch(os, &crp);
258 
259 	crypto_destroyreq(&crp);
260 	if (error) {
261 #ifdef INVARIANTS
262 		if (os->implicit_iv) {
263 			mtx_lock(&os->lock);
264 			os->in_progress = false;
265 			mtx_unlock(&os->lock);
266 		}
267 #endif
268 		return (error);
269 	}
270 
271 	/* Second, add the padding. */
272 	pad = (unsigned)(AES_BLOCK_LEN - (tls_comp_len + os->mac_len + 1)) %
273 	    AES_BLOCK_LEN;
274 	for (i = 0; i < pad + 1; i++)
275 		trailer[os->mac_len + i] = pad;
276 
277 	/* Finally, encrypt the record. */
278 
279 	/*
280 	 * Don't recopy the input iovec, instead just adjust the
281 	 * trailer length and skip over the AAD vector in the uio.
282 	 */
283 	iov[iniovcnt + 1].iov_len += pad + 1;
284 	uio.uio_iov = iov + 1;
285 	uio.uio_iovcnt = iniovcnt + 1;
286 	uio.uio_resid = tls_comp_len + iov[iniovcnt + 1].iov_len;
287 	KASSERT(uio.uio_resid % AES_BLOCK_LEN == 0,
288 	    ("invalid encryption size"));
289 
290 	crypto_initreq(&crp, os->sid);
291 	crp.crp_payload_start = 0;
292 	crp.crp_payload_length = uio.uio_resid;
293 	crp.crp_op = CRYPTO_OP_ENCRYPT;
294 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
295 	if (os->implicit_iv)
296 		memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN);
297 	else
298 		memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN);
299 	crypto_use_uio(&crp, &uio);
300 	if (!inplace) {
301 		memcpy(out_iov, outiov, sizeof(*outiov) * outiovcnt);
302 		out_iov[outiovcnt] = iov[iniovcnt + 1];
303 		out_uio.uio_iov = out_iov;
304 		out_uio.uio_iovcnt = outiovcnt + 1;
305 		out_uio.uio_offset = 0;
306 		out_uio.uio_segflg = UIO_SYSSPACE;
307 		out_uio.uio_td = curthread;
308 		out_uio.uio_resid = uio.uio_resid;
309 		crypto_use_output_uio(&crp, &out_uio);
310 	}
311 
312 	if (os->implicit_iv)
313 		counter_u64_add(ocf_tls10_cbc_crypts, 1);
314 	else
315 		counter_u64_add(ocf_tls11_cbc_crypts, 1);
316 	if (inplace)
317 		counter_u64_add(ocf_inplace, 1);
318 	else
319 		counter_u64_add(ocf_separate_output, 1);
320 	error = ktls_ocf_dispatch(os, &crp);
321 
322 	crypto_destroyreq(&crp);
323 
324 	if (os->implicit_iv) {
325 		KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
326 		    ("trailer too short to read IV"));
327 		memcpy(os->iv, trailer + os->mac_len + pad + 1 - AES_BLOCK_LEN,
328 		    AES_BLOCK_LEN);
329 #ifdef INVARIANTS
330 		mtx_lock(&os->lock);
331 		os->next_seqno = seqno + 1;
332 		os->in_progress = false;
333 		mtx_unlock(&os->lock);
334 #endif
335 	}
336 	return (error);
337 }
338 
339 static int
340 ktls_ocf_tls12_aead_encrypt(struct ktls_session *tls,
341     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
342     struct iovec *outiov, int iniovcnt, int outiovcnt, uint64_t seqno,
343     uint8_t record_type __unused)
344 {
345 	struct uio uio, out_uio, *tag_uio;
346 	struct tls_aead_data ad;
347 	struct cryptop crp;
348 	struct ocf_session *os;
349 	struct iovec iov[outiovcnt + 1];
350 	int i, error;
351 	uint16_t tls_comp_len;
352 	bool inplace;
353 
354 	os = tls->cipher;
355 
356 	uio.uio_iov = iniov;
357 	uio.uio_iovcnt = iniovcnt;
358 	uio.uio_offset = 0;
359 	uio.uio_segflg = UIO_SYSSPACE;
360 	uio.uio_td = curthread;
361 
362 	out_uio.uio_iov = outiov;
363 	out_uio.uio_iovcnt = outiovcnt;
364 	out_uio.uio_offset = 0;
365 	out_uio.uio_segflg = UIO_SYSSPACE;
366 	out_uio.uio_td = curthread;
367 
368 	crypto_initreq(&crp, os->sid);
369 
370 	/* Setup the IV. */
371 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
372 		memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
373 		memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
374 		    sizeof(uint64_t));
375 	} else {
376 		/*
377 		 * Chacha20-Poly1305 constructs the IV for TLS 1.2
378 		 * identically to constructing the IV for AEAD in TLS
379 		 * 1.3.
380 		 */
381 		memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
382 		*(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
383 	}
384 
385 	/* Setup the AAD. */
386 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
387 		tls_comp_len = ntohs(hdr->tls_length) -
388 		    (AES_GMAC_HASH_LEN + sizeof(uint64_t));
389 	else
390 		tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
391 	ad.seq = htobe64(seqno);
392 	ad.type = hdr->tls_type;
393 	ad.tls_vmajor = hdr->tls_vmajor;
394 	ad.tls_vminor = hdr->tls_vminor;
395 	ad.tls_length = htons(tls_comp_len);
396 	crp.crp_aad = &ad;
397 	crp.crp_aad_length = sizeof(ad);
398 
399 	/* Compute payload length and determine if encryption is in place. */
400 	inplace = iniovcnt == outiovcnt;
401 	crp.crp_payload_start = 0;
402 	for (i = 0; i < iniovcnt; i++) {
403 		if (inplace &&
404 		    (i >= outiovcnt || iniov[i].iov_base != outiov[i].iov_base))
405 			inplace = false;
406 		crp.crp_payload_length += iniov[i].iov_len;
407 	}
408 	uio.uio_resid = crp.crp_payload_length;
409 	out_uio.uio_resid = crp.crp_payload_length;
410 
411 	if (inplace)
412 		tag_uio = &uio;
413 	else
414 		tag_uio = &out_uio;
415 
416 	/* Duplicate iovec and append vector for tag. */
417 	memcpy(iov, tag_uio->uio_iov, outiovcnt * sizeof(struct iovec));
418 	iov[outiovcnt].iov_base = trailer;
419 	iov[outiovcnt].iov_len = AES_GMAC_HASH_LEN;
420 	tag_uio->uio_iov = iov;
421 	tag_uio->uio_iovcnt++;
422 	crp.crp_digest_start = tag_uio->uio_resid;
423 	tag_uio->uio_resid += AES_GMAC_HASH_LEN;
424 
425 	crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
426 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
427 	crypto_use_uio(&crp, &uio);
428 	if (!inplace)
429 		crypto_use_output_uio(&crp, &out_uio);
430 
431 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
432 		counter_u64_add(ocf_tls12_gcm_crypts, 1);
433 	else
434 		counter_u64_add(ocf_tls12_chacha20_crypts, 1);
435 	if (inplace)
436 		counter_u64_add(ocf_inplace, 1);
437 	else
438 		counter_u64_add(ocf_separate_output, 1);
439 	error = ktls_ocf_dispatch(os, &crp);
440 
441 	crypto_destroyreq(&crp);
442 	return (error);
443 }
444 
445 static int
446 ktls_ocf_tls12_aead_decrypt(struct ktls_session *tls,
447     const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
448     int *trailer_len)
449 {
450 	struct tls_aead_data ad;
451 	struct cryptop crp;
452 	struct ocf_session *os;
453 	struct ocf_operation oo;
454 	int error;
455 	uint16_t tls_comp_len;
456 
457 	os = tls->cipher;
458 
459 	oo.os = os;
460 	oo.done = false;
461 
462 	crypto_initreq(&crp, os->sid);
463 
464 	/* Setup the IV. */
465 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
466 		memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
467 		memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1,
468 		    sizeof(uint64_t));
469 	} else {
470 		/*
471 		 * Chacha20-Poly1305 constructs the IV for TLS 1.2
472 		 * identically to constructing the IV for AEAD in TLS
473 		 * 1.3.
474 		 */
475 		memcpy(crp.crp_iv, tls->params.iv, tls->params.iv_len);
476 		*(uint64_t *)(crp.crp_iv + 4) ^= htobe64(seqno);
477 	}
478 
479 	/* Setup the AAD. */
480 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
481 		tls_comp_len = ntohs(hdr->tls_length) -
482 		    (AES_GMAC_HASH_LEN + sizeof(uint64_t));
483 	else
484 		tls_comp_len = ntohs(hdr->tls_length) - POLY1305_HASH_LEN;
485 	ad.seq = htobe64(seqno);
486 	ad.type = hdr->tls_type;
487 	ad.tls_vmajor = hdr->tls_vmajor;
488 	ad.tls_vminor = hdr->tls_vminor;
489 	ad.tls_length = htons(tls_comp_len);
490 	crp.crp_aad = &ad;
491 	crp.crp_aad_length = sizeof(ad);
492 
493 	crp.crp_payload_start = tls->params.tls_hlen;
494 	crp.crp_payload_length = tls_comp_len;
495 	crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
496 
497 	crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
498 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
499 	crypto_use_mbuf(&crp, m);
500 
501 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
502 		counter_u64_add(ocf_tls12_gcm_crypts, 1);
503 	else
504 		counter_u64_add(ocf_tls12_chacha20_crypts, 1);
505 	error = ktls_ocf_dispatch(os, &crp);
506 
507 	crypto_destroyreq(&crp);
508 	*trailer_len = AES_GMAC_HASH_LEN;
509 	return (error);
510 }
511 
512 static int
513 ktls_ocf_tls13_aead_encrypt(struct ktls_session *tls,
514     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
515     struct iovec *outiov, int iniovcnt, int outiovcnt, uint64_t seqno,
516     uint8_t record_type)
517 {
518 	struct uio uio, out_uio;
519 	struct tls_aead_data_13 ad;
520 	char nonce[12];
521 	struct cryptop crp;
522 	struct ocf_session *os;
523 	struct iovec iov[iniovcnt + 1], out_iov[outiovcnt + 1];
524 	int i, error;
525 	bool inplace;
526 
527 	os = tls->cipher;
528 
529 	crypto_initreq(&crp, os->sid);
530 
531 	/* Setup the nonce. */
532 	memcpy(nonce, tls->params.iv, tls->params.iv_len);
533 	*(uint64_t *)(nonce + 4) ^= htobe64(seqno);
534 
535 	/* Setup the AAD. */
536 	ad.type = hdr->tls_type;
537 	ad.tls_vmajor = hdr->tls_vmajor;
538 	ad.tls_vminor = hdr->tls_vminor;
539 	ad.tls_length = hdr->tls_length;
540 	crp.crp_aad = &ad;
541 	crp.crp_aad_length = sizeof(ad);
542 
543 	/* Compute payload length and determine if encryption is in place. */
544 	inplace = iniovcnt == outiovcnt;
545 	crp.crp_payload_start = 0;
546 	for (i = 0; i < iniovcnt; i++) {
547 		if (inplace && (i >= outiovcnt ||
548 		    iniov[i].iov_base != outiov[i].iov_base))
549 			inplace = false;
550 		crp.crp_payload_length += iniov[i].iov_len;
551 	}
552 
553 	/* Store the record type as the first byte of the trailer. */
554 	trailer[0] = record_type;
555 	crp.crp_payload_length++;
556 	crp.crp_digest_start = crp.crp_payload_length;
557 
558 	/*
559 	 * Duplicate the input iov to append the trailer.  Always
560 	 * include the full trailer as input to get the record_type
561 	 * even if only the first byte is used.
562 	 */
563 	memcpy(iov, iniov, iniovcnt * sizeof(*iov));
564 	iov[iniovcnt].iov_base = trailer;
565 	iov[iniovcnt].iov_len = tls->params.tls_tlen;
566 	uio.uio_iov = iov;
567 	uio.uio_iovcnt = iniovcnt + 1;
568 	uio.uio_offset = 0;
569 	uio.uio_resid = crp.crp_payload_length + tls->params.tls_tlen - 1;
570 	uio.uio_segflg = UIO_SYSSPACE;
571 	uio.uio_td = curthread;
572 	crypto_use_uio(&crp, &uio);
573 
574 	if (!inplace) {
575 		/* Duplicate the output iov to append the trailer. */
576 		memcpy(out_iov, outiov, outiovcnt * sizeof(*out_iov));
577 		out_iov[outiovcnt] = iov[iniovcnt];
578 
579 		out_uio.uio_iov = out_iov;
580 		out_uio.uio_iovcnt = outiovcnt + 1;
581 		out_uio.uio_offset = 0;
582 		out_uio.uio_resid = crp.crp_payload_length +
583 		    tls->params.tls_tlen - 1;
584 		out_uio.uio_segflg = UIO_SYSSPACE;
585 		out_uio.uio_td = curthread;
586 		crypto_use_output_uio(&crp, &out_uio);
587 	}
588 
589 	crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
590 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
591 
592 	memcpy(crp.crp_iv, nonce, sizeof(nonce));
593 
594 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16)
595 		counter_u64_add(ocf_tls13_gcm_crypts, 1);
596 	else
597 		counter_u64_add(ocf_tls13_chacha20_crypts, 1);
598 	if (inplace)
599 		counter_u64_add(ocf_inplace, 1);
600 	else
601 		counter_u64_add(ocf_separate_output, 1);
602 	error = ktls_ocf_dispatch(os, &crp);
603 
604 	crypto_destroyreq(&crp);
605 	return (error);
606 }
607 
608 static void
609 ktls_ocf_free(struct ktls_session *tls)
610 {
611 	struct ocf_session *os;
612 
613 	os = tls->cipher;
614 	crypto_freesession(os->sid);
615 	mtx_destroy(&os->lock);
616 	zfree(os, M_KTLS_OCF);
617 }
618 
619 static int
620 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
621 {
622 	struct crypto_session_params csp, mac_csp;
623 	struct ocf_session *os;
624 	int error, mac_len;
625 
626 	memset(&csp, 0, sizeof(csp));
627 	memset(&mac_csp, 0, sizeof(mac_csp));
628 	mac_csp.csp_mode = CSP_MODE_NONE;
629 	mac_len = 0;
630 
631 	switch (tls->params.cipher_algorithm) {
632 	case CRYPTO_AES_NIST_GCM_16:
633 		switch (tls->params.cipher_key_len) {
634 		case 128 / 8:
635 		case 256 / 8:
636 			break;
637 		default:
638 			return (EINVAL);
639 		}
640 
641 		/* Only TLS 1.2 and 1.3 are supported. */
642 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
643 		    tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
644 		    tls->params.tls_vminor > TLS_MINOR_VER_THREE)
645 			return (EPROTONOSUPPORT);
646 
647 		/* TLS 1.3 is not yet supported for receive. */
648 		if (direction == KTLS_RX &&
649 		    tls->params.tls_vminor == TLS_MINOR_VER_THREE)
650 			return (EPROTONOSUPPORT);
651 
652 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
653 		csp.csp_mode = CSP_MODE_AEAD;
654 		csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
655 		csp.csp_cipher_key = tls->params.cipher_key;
656 		csp.csp_cipher_klen = tls->params.cipher_key_len;
657 		csp.csp_ivlen = AES_GCM_IV_LEN;
658 		break;
659 	case CRYPTO_AES_CBC:
660 		switch (tls->params.cipher_key_len) {
661 		case 128 / 8:
662 		case 256 / 8:
663 			break;
664 		default:
665 			return (EINVAL);
666 		}
667 
668 		switch (tls->params.auth_algorithm) {
669 		case CRYPTO_SHA1_HMAC:
670 			mac_len = SHA1_HASH_LEN;
671 			break;
672 		case CRYPTO_SHA2_256_HMAC:
673 			mac_len = SHA2_256_HASH_LEN;
674 			break;
675 		case CRYPTO_SHA2_384_HMAC:
676 			mac_len = SHA2_384_HASH_LEN;
677 			break;
678 		default:
679 			return (EINVAL);
680 		}
681 
682 		/* Only TLS 1.0-1.2 are supported. */
683 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
684 		    tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
685 		    tls->params.tls_vminor > TLS_MINOR_VER_TWO)
686 			return (EPROTONOSUPPORT);
687 
688 		/* AES-CBC is not supported for receive. */
689 		if (direction == KTLS_RX)
690 			return (EPROTONOSUPPORT);
691 
692 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
693 		csp.csp_mode = CSP_MODE_CIPHER;
694 		csp.csp_cipher_alg = CRYPTO_AES_CBC;
695 		csp.csp_cipher_key = tls->params.cipher_key;
696 		csp.csp_cipher_klen = tls->params.cipher_key_len;
697 		csp.csp_ivlen = AES_BLOCK_LEN;
698 
699 		mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
700 		mac_csp.csp_mode = CSP_MODE_DIGEST;
701 		mac_csp.csp_auth_alg = tls->params.auth_algorithm;
702 		mac_csp.csp_auth_key = tls->params.auth_key;
703 		mac_csp.csp_auth_klen = tls->params.auth_key_len;
704 		break;
705 	case CRYPTO_CHACHA20_POLY1305:
706 		switch (tls->params.cipher_key_len) {
707 		case 256 / 8:
708 			break;
709 		default:
710 			return (EINVAL);
711 		}
712 
713 		/* Only TLS 1.2 and 1.3 are supported. */
714 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
715 		    tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
716 		    tls->params.tls_vminor > TLS_MINOR_VER_THREE)
717 			return (EPROTONOSUPPORT);
718 
719 		/* TLS 1.3 is not yet supported for receive. */
720 		if (direction == KTLS_RX &&
721 		    tls->params.tls_vminor == TLS_MINOR_VER_THREE)
722 			return (EPROTONOSUPPORT);
723 
724 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
725 		csp.csp_mode = CSP_MODE_AEAD;
726 		csp.csp_cipher_alg = CRYPTO_CHACHA20_POLY1305;
727 		csp.csp_cipher_key = tls->params.cipher_key;
728 		csp.csp_cipher_klen = tls->params.cipher_key_len;
729 		csp.csp_ivlen = CHACHA20_POLY1305_IV_LEN;
730 		break;
731 	default:
732 		return (EPROTONOSUPPORT);
733 	}
734 
735 	os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
736 	if (os == NULL)
737 		return (ENOMEM);
738 
739 	error = crypto_newsession(&os->sid, &csp,
740 	    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
741 	if (error) {
742 		free(os, M_KTLS_OCF);
743 		return (error);
744 	}
745 
746 	if (mac_csp.csp_mode != CSP_MODE_NONE) {
747 		error = crypto_newsession(&os->mac_sid, &mac_csp,
748 		    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
749 		if (error) {
750 			crypto_freesession(os->sid);
751 			free(os, M_KTLS_OCF);
752 			return (error);
753 		}
754 		os->mac_len = mac_len;
755 	}
756 
757 	mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
758 	tls->cipher = os;
759 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16 ||
760 	    tls->params.cipher_algorithm == CRYPTO_CHACHA20_POLY1305) {
761 		if (direction == KTLS_TX) {
762 			if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
763 				tls->sw_encrypt = ktls_ocf_tls13_aead_encrypt;
764 			else
765 				tls->sw_encrypt = ktls_ocf_tls12_aead_encrypt;
766 		} else {
767 			tls->sw_decrypt = ktls_ocf_tls12_aead_decrypt;
768 		}
769 	} else {
770 		tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt;
771 		if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
772 			os->implicit_iv = true;
773 			memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
774 		}
775 	}
776 	tls->free = ktls_ocf_free;
777 	return (0);
778 }
779 
780 struct ktls_crypto_backend ocf_backend = {
781 	.name = "OCF",
782 	.prio = 5,
783 	.api_version = KTLS_API_VERSION,
784 	.try = ktls_ocf_try,
785 };
786 
787 static int
788 ktls_ocf_modevent(module_t mod, int what, void *arg)
789 {
790 	switch (what) {
791 	case MOD_LOAD:
792 		return (ktls_crypto_backend_register(&ocf_backend));
793 	case MOD_UNLOAD:
794 		return (ktls_crypto_backend_deregister(&ocf_backend));
795 	default:
796 		return (EOPNOTSUPP);
797 	}
798 }
799 
800 static moduledata_t ktls_ocf_moduledata = {
801 	"ktls_ocf",
802 	ktls_ocf_modevent,
803 	NULL
804 };
805 
806 DECLARE_MODULE(ktls_ocf, ktls_ocf_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY);
807