xref: /freebsd/sys/opencrypto/ktls_ocf.c (revision c66ec88fed842fbaad62c30d510644ceb7bd2d71)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2019 Netflix Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/counter.h>
35 #include <sys/endian.h>
36 #include <sys/kernel.h>
37 #include <sys/ktls.h>
38 #include <sys/lock.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/uio.h>
44 #include <opencrypto/cryptodev.h>
45 
46 struct ocf_session {
47 	crypto_session_t sid;
48 	crypto_session_t mac_sid;
49 	int mac_len;
50 	struct mtx lock;
51 	bool implicit_iv;
52 
53 	/* Only used for TLS 1.0 with the implicit IV. */
54 #ifdef INVARIANTS
55 	bool in_progress;
56 	uint64_t next_seqno;
57 #endif
58 	char iv[AES_BLOCK_LEN];
59 };
60 
61 struct ocf_operation {
62 	struct ocf_session *os;
63 	bool done;
64 };
65 
66 static MALLOC_DEFINE(M_KTLS_OCF, "ktls_ocf", "OCF KTLS");
67 
68 SYSCTL_DECL(_kern_ipc_tls);
69 SYSCTL_DECL(_kern_ipc_tls_stats);
70 
71 static SYSCTL_NODE(_kern_ipc_tls_stats, OID_AUTO, ocf,
72     CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
73     "Kernel TLS offload via OCF stats");
74 
75 static counter_u64_t ocf_tls10_cbc_crypts;
76 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls10_cbc_crypts,
77     CTLFLAG_RD, &ocf_tls10_cbc_crypts,
78     "Total number of OCF TLS 1.0 CBC encryption operations");
79 
80 static counter_u64_t ocf_tls11_cbc_crypts;
81 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls11_cbc_crypts,
82     CTLFLAG_RD, &ocf_tls11_cbc_crypts,
83     "Total number of OCF TLS 1.1/1.2 CBC encryption operations");
84 
85 static counter_u64_t ocf_tls12_gcm_crypts;
86 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls12_gcm_crypts,
87     CTLFLAG_RD, &ocf_tls12_gcm_crypts,
88     "Total number of OCF TLS 1.2 GCM encryption operations");
89 
90 static counter_u64_t ocf_tls13_gcm_crypts;
91 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, tls13_gcm_crypts,
92     CTLFLAG_RD, &ocf_tls13_gcm_crypts,
93     "Total number of OCF TLS 1.3 GCM encryption operations");
94 
95 static counter_u64_t ocf_inplace;
96 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, inplace,
97     CTLFLAG_RD, &ocf_inplace,
98     "Total number of OCF in-place operations");
99 
100 static counter_u64_t ocf_separate_output;
101 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, separate_output,
102     CTLFLAG_RD, &ocf_separate_output,
103     "Total number of OCF operations with a separate output buffer");
104 
105 static counter_u64_t ocf_retries;
106 SYSCTL_COUNTER_U64(_kern_ipc_tls_stats_ocf, OID_AUTO, retries, CTLFLAG_RD,
107     &ocf_retries,
108     "Number of OCF encryption operation retries");
109 
110 static int
111 ktls_ocf_callback(struct cryptop *crp)
112 {
113 	struct ocf_operation *oo;
114 
115 	oo = crp->crp_opaque;
116 	mtx_lock(&oo->os->lock);
117 	oo->done = true;
118 	mtx_unlock(&oo->os->lock);
119 	wakeup(oo);
120 	return (0);
121 }
122 
123 static int
124 ktls_ocf_dispatch(struct ocf_session *os, struct cryptop *crp)
125 {
126 	struct ocf_operation oo;
127 	int error;
128 
129 	oo.os = os;
130 	oo.done = false;
131 
132 	crp->crp_opaque = &oo;
133 	crp->crp_callback = ktls_ocf_callback;
134 	for (;;) {
135 		error = crypto_dispatch(crp);
136 		if (error)
137 			break;
138 
139 		mtx_lock(&os->lock);
140 		while (!oo.done)
141 			mtx_sleep(&oo, &os->lock, 0, "ocfktls", 0);
142 		mtx_unlock(&os->lock);
143 
144 		if (crp->crp_etype != EAGAIN) {
145 			error = crp->crp_etype;
146 			break;
147 		}
148 
149 		crp->crp_etype = 0;
150 		crp->crp_flags &= ~CRYPTO_F_DONE;
151 		oo.done = false;
152 		counter_u64_add(ocf_retries, 1);
153 	}
154 	return (error);
155 }
156 
157 static int
158 ktls_ocf_tls_cbc_encrypt(struct ktls_session *tls,
159     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
160     struct iovec *outiov, int iovcnt, uint64_t seqno,
161     uint8_t record_type __unused)
162 {
163 	struct uio uio, out_uio;
164 	struct tls_mac_data ad;
165 	struct cryptop crp;
166 	struct ocf_session *os;
167 	struct iovec iov[iovcnt + 2];
168 	struct iovec out_iov[iovcnt + 1];
169 	int i, error;
170 	uint16_t tls_comp_len;
171 	uint8_t pad;
172 	bool inplace;
173 
174 	os = tls->cipher;
175 
176 #ifdef INVARIANTS
177 	if (os->implicit_iv) {
178 		mtx_lock(&os->lock);
179 		KASSERT(!os->in_progress,
180 		    ("concurrent implicit IV encryptions"));
181 		if (os->next_seqno != seqno) {
182 			printf("KTLS CBC: TLS records out of order.  "
183 			    "Expected %ju, got %ju\n",
184 			    (uintmax_t)os->next_seqno, (uintmax_t)seqno);
185 			mtx_unlock(&os->lock);
186 			return (EINVAL);
187 		}
188 		os->in_progress = true;
189 		mtx_unlock(&os->lock);
190 	}
191 #endif
192 
193 	/*
194 	 * Compute the payload length.
195 	 *
196 	 * XXX: This could be easily computed O(1) from the mbuf
197 	 * fields, but we don't have those accessible here.  Can
198 	 * at least compute inplace as well while we are here.
199 	 */
200 	tls_comp_len = 0;
201 	inplace = true;
202 	for (i = 0; i < iovcnt; i++) {
203 		tls_comp_len += iniov[i].iov_len;
204 		if (iniov[i].iov_base != outiov[i].iov_base)
205 			inplace = false;
206 	}
207 
208 	/* Initialize the AAD. */
209 	ad.seq = htobe64(seqno);
210 	ad.type = hdr->tls_type;
211 	ad.tls_vmajor = hdr->tls_vmajor;
212 	ad.tls_vminor = hdr->tls_vminor;
213 	ad.tls_length = htons(tls_comp_len);
214 
215 	/* First, compute the MAC. */
216 	iov[0].iov_base = &ad;
217 	iov[0].iov_len = sizeof(ad);
218 	memcpy(&iov[1], iniov, sizeof(*iniov) * iovcnt);
219 	iov[iovcnt + 1].iov_base = trailer;
220 	iov[iovcnt + 1].iov_len = os->mac_len;
221 	uio.uio_iov = iov;
222 	uio.uio_iovcnt = iovcnt + 2;
223 	uio.uio_offset = 0;
224 	uio.uio_segflg = UIO_SYSSPACE;
225 	uio.uio_td = curthread;
226 	uio.uio_resid = sizeof(ad) + tls_comp_len + os->mac_len;
227 
228 	crypto_initreq(&crp, os->mac_sid);
229 	crp.crp_payload_start = 0;
230 	crp.crp_payload_length = sizeof(ad) + tls_comp_len;
231 	crp.crp_digest_start = crp.crp_payload_length;
232 	crp.crp_op = CRYPTO_OP_COMPUTE_DIGEST;
233 	crp.crp_flags = CRYPTO_F_CBIMM;
234 	crypto_use_uio(&crp, &uio);
235 	error = ktls_ocf_dispatch(os, &crp);
236 
237 	crypto_destroyreq(&crp);
238 	if (error) {
239 #ifdef INVARIANTS
240 		if (os->implicit_iv) {
241 			mtx_lock(&os->lock);
242 			os->in_progress = false;
243 			mtx_unlock(&os->lock);
244 		}
245 #endif
246 		return (error);
247 	}
248 
249 	/* Second, add the padding. */
250 	pad = (unsigned)(AES_BLOCK_LEN - (tls_comp_len + os->mac_len + 1)) %
251 	    AES_BLOCK_LEN;
252 	for (i = 0; i < pad + 1; i++)
253 		trailer[os->mac_len + i] = pad;
254 
255 	/* Finally, encrypt the record. */
256 
257 	/*
258 	 * Don't recopy the input iovec, instead just adjust the
259 	 * trailer length and skip over the AAD vector in the uio.
260 	 */
261 	iov[iovcnt + 1].iov_len += pad + 1;
262 	uio.uio_iov = iov + 1;
263 	uio.uio_iovcnt = iovcnt + 1;
264 	uio.uio_resid = tls_comp_len + iov[iovcnt + 1].iov_len;
265 	KASSERT(uio.uio_resid % AES_BLOCK_LEN == 0,
266 	    ("invalid encryption size"));
267 
268 	crypto_initreq(&crp, os->sid);
269 	crp.crp_payload_start = 0;
270 	crp.crp_payload_length = uio.uio_resid;
271 	crp.crp_op = CRYPTO_OP_ENCRYPT;
272 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
273 	if (os->implicit_iv)
274 		memcpy(crp.crp_iv, os->iv, AES_BLOCK_LEN);
275 	else
276 		memcpy(crp.crp_iv, hdr + 1, AES_BLOCK_LEN);
277 	crypto_use_uio(&crp, &uio);
278 	if (!inplace) {
279 		memcpy(out_iov, outiov, sizeof(*iniov) * iovcnt);
280 		out_iov[iovcnt] = iov[iovcnt + 1];
281 		out_uio.uio_iov = out_iov;
282 		out_uio.uio_iovcnt = iovcnt + 1;
283 		out_uio.uio_offset = 0;
284 		out_uio.uio_segflg = UIO_SYSSPACE;
285 		out_uio.uio_td = curthread;
286 		out_uio.uio_resid = uio.uio_resid;
287 		crypto_use_output_uio(&crp, &out_uio);
288 	}
289 
290 	if (os->implicit_iv)
291 		counter_u64_add(ocf_tls10_cbc_crypts, 1);
292 	else
293 		counter_u64_add(ocf_tls11_cbc_crypts, 1);
294 	if (inplace)
295 		counter_u64_add(ocf_inplace, 1);
296 	else
297 		counter_u64_add(ocf_separate_output, 1);
298 	error = ktls_ocf_dispatch(os, &crp);
299 
300 	crypto_destroyreq(&crp);
301 
302 	if (os->implicit_iv) {
303 		KASSERT(os->mac_len + pad + 1 >= AES_BLOCK_LEN,
304 		    ("trailer too short to read IV"));
305 		memcpy(os->iv, trailer + os->mac_len + pad + 1 - AES_BLOCK_LEN,
306 		    AES_BLOCK_LEN);
307 #ifdef INVARIANTS
308 		mtx_lock(&os->lock);
309 		os->next_seqno = seqno + 1;
310 		os->in_progress = false;
311 		mtx_unlock(&os->lock);
312 #endif
313 	}
314 	return (error);
315 }
316 
317 static int
318 ktls_ocf_tls12_gcm_encrypt(struct ktls_session *tls,
319     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
320     struct iovec *outiov, int iovcnt, uint64_t seqno,
321     uint8_t record_type __unused)
322 {
323 	struct uio uio, out_uio, *tag_uio;
324 	struct tls_aead_data ad;
325 	struct cryptop crp;
326 	struct ocf_session *os;
327 	struct iovec iov[iovcnt + 1];
328 	int i, error;
329 	uint16_t tls_comp_len;
330 	bool inplace;
331 
332 	os = tls->cipher;
333 
334 	uio.uio_iov = iniov;
335 	uio.uio_iovcnt = iovcnt;
336 	uio.uio_offset = 0;
337 	uio.uio_segflg = UIO_SYSSPACE;
338 	uio.uio_td = curthread;
339 
340 	out_uio.uio_iov = outiov;
341 	out_uio.uio_iovcnt = iovcnt;
342 	out_uio.uio_offset = 0;
343 	out_uio.uio_segflg = UIO_SYSSPACE;
344 	out_uio.uio_td = curthread;
345 
346 	crypto_initreq(&crp, os->sid);
347 
348 	/* Setup the IV. */
349 	memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
350 	memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
351 
352 	/* Setup the AAD. */
353 	tls_comp_len = ntohs(hdr->tls_length) -
354 	    (AES_GMAC_HASH_LEN + sizeof(uint64_t));
355 	ad.seq = htobe64(seqno);
356 	ad.type = hdr->tls_type;
357 	ad.tls_vmajor = hdr->tls_vmajor;
358 	ad.tls_vminor = hdr->tls_vminor;
359 	ad.tls_length = htons(tls_comp_len);
360 	crp.crp_aad = &ad;
361 	crp.crp_aad_length = sizeof(ad);
362 
363 	/* Compute payload length and determine if encryption is in place. */
364 	inplace = true;
365 	crp.crp_payload_start = 0;
366 	for (i = 0; i < iovcnt; i++) {
367 		if (iniov[i].iov_base != outiov[i].iov_base)
368 			inplace = false;
369 		crp.crp_payload_length += iniov[i].iov_len;
370 	}
371 	uio.uio_resid = crp.crp_payload_length;
372 	out_uio.uio_resid = crp.crp_payload_length;
373 
374 	if (inplace)
375 		tag_uio = &uio;
376 	else
377 		tag_uio = &out_uio;
378 
379 	/* Duplicate iovec and append vector for tag. */
380 	memcpy(iov, tag_uio->uio_iov, iovcnt * sizeof(struct iovec));
381 	iov[iovcnt].iov_base = trailer;
382 	iov[iovcnt].iov_len = AES_GMAC_HASH_LEN;
383 	tag_uio->uio_iov = iov;
384 	tag_uio->uio_iovcnt++;
385 	crp.crp_digest_start = tag_uio->uio_resid;
386 	tag_uio->uio_resid += AES_GMAC_HASH_LEN;
387 
388 	crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
389 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
390 	crypto_use_uio(&crp, &uio);
391 	if (!inplace)
392 		crypto_use_output_uio(&crp, &out_uio);
393 
394 	counter_u64_add(ocf_tls12_gcm_crypts, 1);
395 	if (inplace)
396 		counter_u64_add(ocf_inplace, 1);
397 	else
398 		counter_u64_add(ocf_separate_output, 1);
399 	error = ktls_ocf_dispatch(os, &crp);
400 
401 	crypto_destroyreq(&crp);
402 	return (error);
403 }
404 
405 static int
406 ktls_ocf_tls12_gcm_decrypt(struct ktls_session *tls,
407     const struct tls_record_layer *hdr, struct mbuf *m, uint64_t seqno,
408     int *trailer_len)
409 {
410 	struct tls_aead_data ad;
411 	struct cryptop crp;
412 	struct ocf_session *os;
413 	struct ocf_operation oo;
414 	int error;
415 	uint16_t tls_comp_len;
416 
417 	os = tls->cipher;
418 
419 	oo.os = os;
420 	oo.done = false;
421 
422 	crypto_initreq(&crp, os->sid);
423 
424 	/* Setup the IV. */
425 	memcpy(crp.crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
426 	memcpy(crp.crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
427 
428 	/* Setup the AAD. */
429 	tls_comp_len = ntohs(hdr->tls_length) -
430 	    (AES_GMAC_HASH_LEN + sizeof(uint64_t));
431 	ad.seq = htobe64(seqno);
432 	ad.type = hdr->tls_type;
433 	ad.tls_vmajor = hdr->tls_vmajor;
434 	ad.tls_vminor = hdr->tls_vminor;
435 	ad.tls_length = htons(tls_comp_len);
436 	crp.crp_aad = &ad;
437 	crp.crp_aad_length = sizeof(ad);
438 
439 	crp.crp_payload_start = tls->params.tls_hlen;
440 	crp.crp_payload_length = tls_comp_len;
441 	crp.crp_digest_start = crp.crp_payload_start + crp.crp_payload_length;
442 
443 	crp.crp_op = CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST;
444 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
445 	crypto_use_mbuf(&crp, m);
446 
447 	counter_u64_add(ocf_tls12_gcm_crypts, 1);
448 	error = ktls_ocf_dispatch(os, &crp);
449 
450 	crypto_destroyreq(&crp);
451 	*trailer_len = AES_GMAC_HASH_LEN;
452 	return (error);
453 }
454 
455 static int
456 ktls_ocf_tls13_gcm_encrypt(struct ktls_session *tls,
457     const struct tls_record_layer *hdr, uint8_t *trailer, struct iovec *iniov,
458     struct iovec *outiov, int iovcnt, uint64_t seqno, uint8_t record_type)
459 {
460 	struct uio uio, out_uio;
461 	struct tls_aead_data_13 ad;
462 	char nonce[12];
463 	struct cryptop crp;
464 	struct ocf_session *os;
465 	struct iovec iov[iovcnt + 1], out_iov[iovcnt + 1];
466 	int i, error;
467 	bool inplace;
468 
469 	os = tls->cipher;
470 
471 	crypto_initreq(&crp, os->sid);
472 
473 	/* Setup the nonce. */
474 	memcpy(nonce, tls->params.iv, tls->params.iv_len);
475 	*(uint64_t *)(nonce + 4) ^= htobe64(seqno);
476 
477 	/* Setup the AAD. */
478 	ad.type = hdr->tls_type;
479 	ad.tls_vmajor = hdr->tls_vmajor;
480 	ad.tls_vminor = hdr->tls_vminor;
481 	ad.tls_length = hdr->tls_length;
482 	crp.crp_aad = &ad;
483 	crp.crp_aad_length = sizeof(ad);
484 
485 	/* Compute payload length and determine if encryption is in place. */
486 	inplace = true;
487 	crp.crp_payload_start = 0;
488 	for (i = 0; i < iovcnt; i++) {
489 		if (iniov[i].iov_base != outiov[i].iov_base)
490 			inplace = false;
491 		crp.crp_payload_length += iniov[i].iov_len;
492 	}
493 
494 	/* Store the record type as the first byte of the trailer. */
495 	trailer[0] = record_type;
496 	crp.crp_payload_length++;
497 	crp.crp_digest_start = crp.crp_payload_length;
498 
499 	/*
500 	 * Duplicate the input iov to append the trailer.  Always
501 	 * include the full trailer as input to get the record_type
502 	 * even if only the first byte is used.
503 	 */
504 	memcpy(iov, iniov, iovcnt * sizeof(*iov));
505 	iov[iovcnt].iov_base = trailer;
506 	iov[iovcnt].iov_len = AES_GMAC_HASH_LEN + 1;
507 	uio.uio_iov = iov;
508 	uio.uio_iovcnt = iovcnt + 1;
509 	uio.uio_offset = 0;
510 	uio.uio_resid = crp.crp_payload_length + AES_GMAC_HASH_LEN;
511 	uio.uio_segflg = UIO_SYSSPACE;
512 	uio.uio_td = curthread;
513 	crypto_use_uio(&crp, &uio);
514 
515 	if (!inplace) {
516 		/* Duplicate the output iov to append the trailer. */
517 		memcpy(out_iov, outiov, iovcnt * sizeof(*out_iov));
518 		out_iov[iovcnt] = iov[iovcnt];
519 
520 		out_uio.uio_iov = out_iov;
521 		out_uio.uio_iovcnt = iovcnt + 1;
522 		out_uio.uio_offset = 0;
523 		out_uio.uio_resid = crp.crp_payload_length +
524 		    AES_GMAC_HASH_LEN;
525 		out_uio.uio_segflg = UIO_SYSSPACE;
526 		out_uio.uio_td = curthread;
527 		crypto_use_output_uio(&crp, &out_uio);
528 	}
529 
530 	crp.crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
531 	crp.crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
532 
533 	memcpy(crp.crp_iv, nonce, sizeof(nonce));
534 
535 	counter_u64_add(ocf_tls13_gcm_crypts, 1);
536 	if (inplace)
537 		counter_u64_add(ocf_inplace, 1);
538 	else
539 		counter_u64_add(ocf_separate_output, 1);
540 	error = ktls_ocf_dispatch(os, &crp);
541 
542 	crypto_destroyreq(&crp);
543 	return (error);
544 }
545 
546 static void
547 ktls_ocf_free(struct ktls_session *tls)
548 {
549 	struct ocf_session *os;
550 
551 	os = tls->cipher;
552 	crypto_freesession(os->sid);
553 	mtx_destroy(&os->lock);
554 	zfree(os, M_KTLS_OCF);
555 }
556 
557 static int
558 ktls_ocf_try(struct socket *so, struct ktls_session *tls, int direction)
559 {
560 	struct crypto_session_params csp, mac_csp;
561 	struct ocf_session *os;
562 	int error, mac_len;
563 
564 	memset(&csp, 0, sizeof(csp));
565 	memset(&mac_csp, 0, sizeof(mac_csp));
566 	mac_csp.csp_mode = CSP_MODE_NONE;
567 	mac_len = 0;
568 
569 	switch (tls->params.cipher_algorithm) {
570 	case CRYPTO_AES_NIST_GCM_16:
571 		switch (tls->params.cipher_key_len) {
572 		case 128 / 8:
573 		case 256 / 8:
574 			break;
575 		default:
576 			return (EINVAL);
577 		}
578 
579 		/* Only TLS 1.2 and 1.3 are supported. */
580 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
581 		    tls->params.tls_vminor < TLS_MINOR_VER_TWO ||
582 		    tls->params.tls_vminor > TLS_MINOR_VER_THREE)
583 			return (EPROTONOSUPPORT);
584 
585 		/* TLS 1.3 is not yet supported for receive. */
586 		if (direction == KTLS_RX &&
587 		    tls->params.tls_vminor == TLS_MINOR_VER_THREE)
588 			return (EPROTONOSUPPORT);
589 
590 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD;
591 		csp.csp_mode = CSP_MODE_AEAD;
592 		csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
593 		csp.csp_cipher_key = tls->params.cipher_key;
594 		csp.csp_cipher_klen = tls->params.cipher_key_len;
595 		csp.csp_ivlen = AES_GCM_IV_LEN;
596 		break;
597 	case CRYPTO_AES_CBC:
598 		switch (tls->params.cipher_key_len) {
599 		case 128 / 8:
600 		case 256 / 8:
601 			break;
602 		default:
603 			return (EINVAL);
604 		}
605 
606 		switch (tls->params.auth_algorithm) {
607 		case CRYPTO_SHA1_HMAC:
608 			mac_len = SHA1_HASH_LEN;
609 			break;
610 		case CRYPTO_SHA2_256_HMAC:
611 			mac_len = SHA2_256_HASH_LEN;
612 			break;
613 		case CRYPTO_SHA2_384_HMAC:
614 			mac_len = SHA2_384_HASH_LEN;
615 			break;
616 		default:
617 			return (EINVAL);
618 		}
619 
620 		/* Only TLS 1.0-1.2 are supported. */
621 		if (tls->params.tls_vmajor != TLS_MAJOR_VER_ONE ||
622 		    tls->params.tls_vminor < TLS_MINOR_VER_ZERO ||
623 		    tls->params.tls_vminor > TLS_MINOR_VER_TWO)
624 			return (EPROTONOSUPPORT);
625 
626 		/* AES-CBC is not supported for receive. */
627 		if (direction == KTLS_RX)
628 			return (EPROTONOSUPPORT);
629 
630 		csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
631 		csp.csp_mode = CSP_MODE_CIPHER;
632 		csp.csp_cipher_alg = CRYPTO_AES_CBC;
633 		csp.csp_cipher_key = tls->params.cipher_key;
634 		csp.csp_cipher_klen = tls->params.cipher_key_len;
635 		csp.csp_ivlen = AES_BLOCK_LEN;
636 
637 		mac_csp.csp_flags |= CSP_F_SEPARATE_OUTPUT;
638 		mac_csp.csp_mode = CSP_MODE_DIGEST;
639 		mac_csp.csp_auth_alg = tls->params.auth_algorithm;
640 		mac_csp.csp_auth_key = tls->params.auth_key;
641 		mac_csp.csp_auth_klen = tls->params.auth_key_len;
642 		break;
643 	default:
644 		return (EPROTONOSUPPORT);
645 	}
646 
647 	os = malloc(sizeof(*os), M_KTLS_OCF, M_NOWAIT | M_ZERO);
648 	if (os == NULL)
649 		return (ENOMEM);
650 
651 	error = crypto_newsession(&os->sid, &csp,
652 	    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
653 	if (error) {
654 		free(os, M_KTLS_OCF);
655 		return (error);
656 	}
657 
658 	if (mac_csp.csp_mode != CSP_MODE_NONE) {
659 		error = crypto_newsession(&os->mac_sid, &mac_csp,
660 		    CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
661 		if (error) {
662 			crypto_freesession(os->sid);
663 			free(os, M_KTLS_OCF);
664 			return (error);
665 		}
666 		os->mac_len = mac_len;
667 	}
668 
669 	mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
670 	tls->cipher = os;
671 	if (tls->params.cipher_algorithm == CRYPTO_AES_NIST_GCM_16) {
672 		if (direction == KTLS_TX) {
673 			if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
674 				tls->sw_encrypt = ktls_ocf_tls13_gcm_encrypt;
675 			else
676 				tls->sw_encrypt = ktls_ocf_tls12_gcm_encrypt;
677 		} else {
678 			tls->sw_decrypt = ktls_ocf_tls12_gcm_decrypt;
679 		}
680 	} else {
681 		tls->sw_encrypt = ktls_ocf_tls_cbc_encrypt;
682 		if (tls->params.tls_vminor == TLS_MINOR_VER_ZERO) {
683 			os->implicit_iv = true;
684 			memcpy(os->iv, tls->params.iv, AES_BLOCK_LEN);
685 		}
686 	}
687 	tls->free = ktls_ocf_free;
688 	return (0);
689 }
690 
691 struct ktls_crypto_backend ocf_backend = {
692 	.name = "OCF",
693 	.prio = 5,
694 	.api_version = KTLS_API_VERSION,
695 	.try = ktls_ocf_try,
696 };
697 
698 static int
699 ktls_ocf_modevent(module_t mod, int what, void *arg)
700 {
701 	int error;
702 
703 	switch (what) {
704 	case MOD_LOAD:
705 		ocf_tls10_cbc_crypts = counter_u64_alloc(M_WAITOK);
706 		ocf_tls11_cbc_crypts = counter_u64_alloc(M_WAITOK);
707 		ocf_tls12_gcm_crypts = counter_u64_alloc(M_WAITOK);
708 		ocf_tls13_gcm_crypts = counter_u64_alloc(M_WAITOK);
709 		ocf_inplace = counter_u64_alloc(M_WAITOK);
710 		ocf_separate_output = counter_u64_alloc(M_WAITOK);
711 		ocf_retries = counter_u64_alloc(M_WAITOK);
712 		return (ktls_crypto_backend_register(&ocf_backend));
713 	case MOD_UNLOAD:
714 		error = ktls_crypto_backend_deregister(&ocf_backend);
715 		if (error)
716 			return (error);
717 		counter_u64_free(ocf_tls10_cbc_crypts);
718 		counter_u64_free(ocf_tls11_cbc_crypts);
719 		counter_u64_free(ocf_tls12_gcm_crypts);
720 		counter_u64_free(ocf_tls13_gcm_crypts);
721 		counter_u64_free(ocf_inplace);
722 		counter_u64_free(ocf_separate_output);
723 		counter_u64_free(ocf_retries);
724 		return (0);
725 	default:
726 		return (EOPNOTSUPP);
727 	}
728 }
729 
730 static moduledata_t ktls_ocf_moduledata = {
731 	"ktls_ocf",
732 	ktls_ocf_modevent,
733 	NULL
734 };
735 
736 DECLARE_MODULE(ktls_ocf, ktls_ocf_moduledata, SI_SUB_PROTO_END, SI_ORDER_ANY);
737