xref: /freebsd/sys/opencrypto/cryptosoft.c (revision 9b5631807ebc64e1fdfd2b23e402d79aec6b47c5)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/blowfish/blowfish.h>
51 #include <crypto/sha1.h>
52 #include <opencrypto/rmd160.h>
53 #include <opencrypto/cast.h>
54 #include <opencrypto/skipjack.h>
55 #include <sys/md5.h>
56 
57 #include <opencrypto/cryptodev.h>
58 #include <opencrypto/xform.h>
59 
60 #include <sys/kobj.h>
61 #include <sys/bus.h>
62 #include "cryptodev_if.h"
63 
64 struct swcr_auth {
65 	void		*sw_ictx;
66 	void		*sw_octx;
67 	struct auth_hash *sw_axf;
68 	uint16_t	sw_mlen;
69 	uint16_t	sw_octx_len;
70 };
71 
72 struct swcr_encdec {
73 	uint8_t		*sw_kschedule;
74 	struct enc_xform *sw_exf;
75 };
76 
77 struct swcr_compdec {
78 	struct comp_algo *sw_cxf;
79 };
80 
81 struct swcr_session {
82 	struct mtx	swcr_lock;
83 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
84 
85 	struct swcr_auth swcr_auth;
86 	struct swcr_encdec swcr_encdec;
87 	struct swcr_compdec swcr_compdec;
88 };
89 
90 static	int32_t swcr_id;
91 
92 static	void swcr_freesession(device_t dev, crypto_session_t cses);
93 
94 /* Used for CRYPTO_NULL_CBC. */
95 static int
96 swcr_null(struct swcr_session *ses, struct cryptop *crp)
97 {
98 
99 	return (0);
100 }
101 
102 /*
103  * Apply a symmetric encryption/decryption algorithm.
104  */
105 static int
106 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
107 {
108 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
109 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
110 	const struct crypto_session_params *csp;
111 	struct swcr_encdec *sw;
112 	struct enc_xform *exf;
113 	int i, j, k, blks, ind, count, ivlen;
114 	struct uio *uio, uiolcl;
115 	struct iovec iovlcl[4];
116 	struct iovec *iov;
117 	int iovcnt, iovalloc;
118 	int error;
119 	bool encrypting;
120 
121 	error = 0;
122 
123 	sw = &ses->swcr_encdec;
124 	exf = sw->sw_exf;
125 	blks = exf->blocksize;
126 	ivlen = exf->ivsize;
127 
128 	/* Check for non-padded data */
129 	if ((crp->crp_payload_length % blks) != 0)
130 		return EINVAL;
131 
132 	if (exf == &enc_xform_aes_icm &&
133 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
134 		return (EINVAL);
135 
136 	crypto_read_iv(crp, iv);
137 
138 	if (crp->crp_cipher_key != NULL) {
139 		if (sw->sw_kschedule)
140 			exf->zerokey(&(sw->sw_kschedule));
141 
142 		csp = crypto_get_params(crp->crp_session);
143 		error = exf->setkey(&sw->sw_kschedule,
144 		    crp->crp_cipher_key, csp->csp_cipher_klen);
145 		if (error)
146 			return (error);
147 	}
148 
149 	iov = iovlcl;
150 	iovcnt = nitems(iovlcl);
151 	iovalloc = 0;
152 	uio = &uiolcl;
153 	switch (crp->crp_buf_type) {
154 	case CRYPTO_BUF_MBUF:
155 		error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt,
156 		    &iovalloc);
157 		if (error)
158 			return (error);
159 		uio->uio_iov = iov;
160 		uio->uio_iovcnt = iovcnt;
161 		break;
162 	case CRYPTO_BUF_UIO:
163 		uio = crp->crp_uio;
164 		break;
165 	case CRYPTO_BUF_CONTIG:
166 		iov[0].iov_base = crp->crp_buf;
167 		iov[0].iov_len = crp->crp_ilen;
168 		uio->uio_iov = iov;
169 		uio->uio_iovcnt = 1;
170 		break;
171 	}
172 
173 	ivp = iv;
174 
175 	if (exf->reinit) {
176 		/*
177 		 * xforms that provide a reinit method perform all IV
178 		 * handling themselves.
179 		 */
180 		exf->reinit(sw->sw_kschedule, iv);
181 	}
182 
183 	count = crp->crp_payload_start;
184 	ind = cuio_getptr(uio, count, &k);
185 	if (ind == -1) {
186 		error = EINVAL;
187 		goto out;
188 	}
189 
190 	i = crp->crp_payload_length;
191 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
192 
193 	while (i > 0) {
194 		/*
195 		 * If there's insufficient data at the end of
196 		 * an iovec, we have to do some copying.
197 		 */
198 		if (uio->uio_iov[ind].iov_len < k + blks &&
199 		    uio->uio_iov[ind].iov_len != k) {
200 			cuio_copydata(uio, count, blks, blk);
201 
202 			/* Actual encryption/decryption */
203 			if (exf->reinit) {
204 				if (encrypting) {
205 					exf->encrypt(sw->sw_kschedule,
206 					    blk);
207 				} else {
208 					exf->decrypt(sw->sw_kschedule,
209 					    blk);
210 				}
211 			} else if (encrypting) {
212 				/* XOR with previous block */
213 				for (j = 0; j < blks; j++)
214 					blk[j] ^= ivp[j];
215 
216 				exf->encrypt(sw->sw_kschedule, blk);
217 
218 				/*
219 				 * Keep encrypted block for XOR'ing
220 				 * with next block
221 				 */
222 				bcopy(blk, iv, blks);
223 				ivp = iv;
224 			} else {	/* decrypt */
225 				/*
226 				 * Keep encrypted block for XOR'ing
227 				 * with next block
228 				 */
229 				nivp = (ivp == iv) ? iv2 : iv;
230 				bcopy(blk, nivp, blks);
231 
232 				exf->decrypt(sw->sw_kschedule, blk);
233 
234 				/* XOR with previous block */
235 				for (j = 0; j < blks; j++)
236 					blk[j] ^= ivp[j];
237 
238 				ivp = nivp;
239 			}
240 
241 			/* Copy back decrypted block */
242 			cuio_copyback(uio, count, blks, blk);
243 
244 			count += blks;
245 
246 			/* Advance pointer */
247 			ind = cuio_getptr(uio, count, &k);
248 			if (ind == -1) {
249 				error = EINVAL;
250 				goto out;
251 			}
252 
253 			i -= blks;
254 
255 			/* Could be done... */
256 			if (i == 0)
257 				break;
258 		}
259 
260 		while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
261 			uint8_t *idat;
262 			size_t nb, rem;
263 
264 			nb = blks;
265 			rem = MIN((size_t)i,
266 			    uio->uio_iov[ind].iov_len - (size_t)k);
267 			idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
268 
269 			if (exf->reinit) {
270 				if (encrypting && exf->encrypt_multi == NULL)
271 					exf->encrypt(sw->sw_kschedule,
272 					    idat);
273 				else if (encrypting) {
274 					nb = rounddown(rem, blks);
275 					exf->encrypt_multi(sw->sw_kschedule,
276 					    idat, nb);
277 				} else if (exf->decrypt_multi == NULL)
278 					exf->decrypt(sw->sw_kschedule,
279 					    idat);
280 				else {
281 					nb = rounddown(rem, blks);
282 					exf->decrypt_multi(sw->sw_kschedule,
283 					    idat, nb);
284 				}
285 			} else if (encrypting) {
286 				/* XOR with previous block/IV */
287 				for (j = 0; j < blks; j++)
288 					idat[j] ^= ivp[j];
289 
290 				exf->encrypt(sw->sw_kschedule, idat);
291 				ivp = idat;
292 			} else {	/* decrypt */
293 				/*
294 				 * Keep encrypted block to be used
295 				 * in next block's processing.
296 				 */
297 				nivp = (ivp == iv) ? iv2 : iv;
298 				bcopy(idat, nivp, blks);
299 
300 				exf->decrypt(sw->sw_kschedule, idat);
301 
302 				/* XOR with previous block/IV */
303 				for (j = 0; j < blks; j++)
304 					idat[j] ^= ivp[j];
305 
306 				ivp = nivp;
307 			}
308 
309 			count += nb;
310 			k += nb;
311 			i -= nb;
312 		}
313 
314 		/*
315 		 * Advance to the next iov if the end of the current iov
316 		 * is aligned with the end of a cipher block.
317 		 * Note that the code is equivalent to calling:
318 		 *      ind = cuio_getptr(uio, count, &k);
319 		 */
320 		if (i > 0 && k == uio->uio_iov[ind].iov_len) {
321 			k = 0;
322 			ind++;
323 			if (ind >= uio->uio_iovcnt) {
324 				error = EINVAL;
325 				goto out;
326 			}
327 		}
328 	}
329 
330 out:
331 	if (iovalloc)
332 		free(iov, M_CRYPTO_DATA);
333 
334 	return (error);
335 }
336 
337 static void
338 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
339     const uint8_t *key, int klen)
340 {
341 
342 	switch (axf->type) {
343 	case CRYPTO_MD5_HMAC:
344 	case CRYPTO_SHA1_HMAC:
345 	case CRYPTO_SHA2_224_HMAC:
346 	case CRYPTO_SHA2_256_HMAC:
347 	case CRYPTO_SHA2_384_HMAC:
348 	case CRYPTO_SHA2_512_HMAC:
349 	case CRYPTO_NULL_HMAC:
350 	case CRYPTO_RIPEMD160_HMAC:
351 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
352 		hmac_init_opad(axf, key, klen, sw->sw_octx);
353 		break;
354 	case CRYPTO_MD5_KPDK:
355 	case CRYPTO_SHA1_KPDK:
356 	{
357 		/*
358 		 * We need a buffer that can hold an md5 and a sha1 result
359 		 * just to throw it away.
360 		 * What we do here is the initial part of:
361 		 *   ALGO( key, keyfill, .. )
362 		 * adding the key to sw_ictx and abusing Final() to get the
363 		 * "keyfill" padding.
364 		 * In addition we abuse the sw_octx to save the key to have
365 		 * it to be able to append it at the end in swcr_authcompute().
366 		 */
367 		u_char buf[SHA1_RESULTLEN];
368 
369 		bcopy(key, sw->sw_octx, klen);
370 		axf->Init(sw->sw_ictx);
371 		axf->Update(sw->sw_ictx, key, klen);
372 		axf->Final(buf, sw->sw_ictx);
373 		break;
374 	}
375 	case CRYPTO_POLY1305:
376 	case CRYPTO_BLAKE2B:
377 	case CRYPTO_BLAKE2S:
378 		axf->Setkey(sw->sw_ictx, key, klen);
379 		axf->Init(sw->sw_ictx);
380 		break;
381 	default:
382 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
383 	}
384 }
385 
386 /*
387  * Compute or verify hash.
388  */
389 static int
390 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
391 {
392 	u_char aalg[HASH_MAX_LEN];
393 	u_char uaalg[HASH_MAX_LEN];
394 	const struct crypto_session_params *csp;
395 	struct swcr_auth *sw;
396 	struct auth_hash *axf;
397 	union authctx ctx;
398 	int err;
399 
400 	sw = &ses->swcr_auth;
401 
402 	axf = sw->sw_axf;
403 
404 	if (crp->crp_auth_key != NULL) {
405 		csp = crypto_get_params(crp->crp_session);
406 		swcr_authprepare(axf, sw, crp->crp_auth_key,
407 		    csp->csp_auth_klen);
408 	}
409 
410 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
411 
412 	err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
413 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
414 	if (err)
415 		return err;
416 
417 	err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
418 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
419 	if (err)
420 		return err;
421 
422 	switch (axf->type) {
423 	case CRYPTO_SHA1:
424 	case CRYPTO_SHA2_224:
425 	case CRYPTO_SHA2_256:
426 	case CRYPTO_SHA2_384:
427 	case CRYPTO_SHA2_512:
428 		axf->Final(aalg, &ctx);
429 		break;
430 
431 	case CRYPTO_MD5_HMAC:
432 	case CRYPTO_SHA1_HMAC:
433 	case CRYPTO_SHA2_224_HMAC:
434 	case CRYPTO_SHA2_256_HMAC:
435 	case CRYPTO_SHA2_384_HMAC:
436 	case CRYPTO_SHA2_512_HMAC:
437 	case CRYPTO_RIPEMD160_HMAC:
438 		if (sw->sw_octx == NULL)
439 			return EINVAL;
440 
441 		axf->Final(aalg, &ctx);
442 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
443 		axf->Update(&ctx, aalg, axf->hashsize);
444 		axf->Final(aalg, &ctx);
445 		break;
446 
447 	case CRYPTO_MD5_KPDK:
448 	case CRYPTO_SHA1_KPDK:
449 		/* If we have no key saved, return error. */
450 		if (sw->sw_octx == NULL)
451 			return EINVAL;
452 
453 		/*
454 		 * Add the trailing copy of the key (see comment in
455 		 * swcr_authprepare()) after the data:
456 		 *   ALGO( .., key, algofill )
457 		 * and let Final() do the proper, natural "algofill"
458 		 * padding.
459 		 */
460 		axf->Update(&ctx, sw->sw_octx, sw->sw_octx_len);
461 		axf->Final(aalg, &ctx);
462 		break;
463 
464 	case CRYPTO_BLAKE2B:
465 	case CRYPTO_BLAKE2S:
466 	case CRYPTO_NULL_HMAC:
467 	case CRYPTO_POLY1305:
468 		axf->Final(aalg, &ctx);
469 		break;
470 	}
471 
472 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
473 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
474 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
475 			return (EBADMSG);
476 	} else {
477 		/* Inject the authentication data */
478 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
479 	}
480 	return (0);
481 }
482 
483 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
484 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
485 
486 static int
487 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
488 {
489 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
490 	u_char *blk = (u_char *)blkbuf;
491 	u_char aalg[AALG_MAX_RESULT_LEN];
492 	u_char uaalg[AALG_MAX_RESULT_LEN];
493 	u_char iv[EALG_MAX_BLOCK_LEN];
494 	union authctx ctx;
495 	struct swcr_auth *swa;
496 	struct auth_hash *axf;
497 	uint32_t *blkp;
498 	int blksz, i, ivlen, len;
499 
500 	swa = &ses->swcr_auth;
501 	axf = swa->sw_axf;
502 
503 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
504 	blksz = axf->blocksize;
505 
506 	/* Initialize the IV */
507 	ivlen = AES_GCM_IV_LEN;
508 	crypto_read_iv(crp, iv);
509 
510 	axf->Reinit(&ctx, iv, ivlen);
511 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
512 		len = MIN(crp->crp_payload_length - i, blksz);
513 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
514 		bzero(blk + len, blksz - len);
515 		axf->Update(&ctx, blk, blksz);
516 	}
517 
518 	/* length block */
519 	bzero(blk, blksz);
520 	blkp = (uint32_t *)blk + 1;
521 	*blkp = htobe32(crp->crp_payload_length * 8);
522 	axf->Update(&ctx, blk, blksz);
523 
524 	/* Finalize MAC */
525 	axf->Final(aalg, &ctx);
526 
527 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
528 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
529 		    uaalg);
530 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
531 			return (EBADMSG);
532 	} else {
533 		/* Inject the authentication data */
534 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
535 	}
536 	return (0);
537 }
538 
539 static int
540 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
541 {
542 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
543 	u_char *blk = (u_char *)blkbuf;
544 	u_char aalg[AALG_MAX_RESULT_LEN];
545 	u_char uaalg[AALG_MAX_RESULT_LEN];
546 	u_char iv[EALG_MAX_BLOCK_LEN];
547 	union authctx ctx;
548 	struct swcr_auth *swa;
549 	struct swcr_encdec *swe;
550 	struct auth_hash *axf;
551 	struct enc_xform *exf;
552 	uint32_t *blkp;
553 	int blksz, i, ivlen, len, r;
554 
555 	swa = &ses->swcr_auth;
556 	axf = swa->sw_axf;
557 
558 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
559 	blksz = axf->blocksize;
560 
561 	swe = &ses->swcr_encdec;
562 	exf = swe->sw_exf;
563 
564 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
565 		return (EINVAL);
566 
567 	/* Initialize the IV */
568 	ivlen = AES_GCM_IV_LEN;
569 	bcopy(crp->crp_iv, iv, ivlen);
570 
571 	/* Supply MAC with IV */
572 	axf->Reinit(&ctx, iv, ivlen);
573 
574 	/* Supply MAC with AAD */
575 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
576 		len = MIN(crp->crp_aad_length - i, blksz);
577 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
578 		bzero(blk + len, blksz - len);
579 		axf->Update(&ctx, blk, blksz);
580 	}
581 
582 	exf->reinit(swe->sw_kschedule, iv);
583 
584 	/* Do encryption with MAC */
585 	for (i = 0; i < crp->crp_payload_length; i += len) {
586 		len = MIN(crp->crp_payload_length - i, blksz);
587 		if (len < blksz)
588 			bzero(blk, blksz);
589 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
590 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
591 			exf->encrypt(swe->sw_kschedule, blk);
592 			axf->Update(&ctx, blk, len);
593 			crypto_copyback(crp, crp->crp_payload_start + i, len,
594 			    blk);
595 		} else {
596 			axf->Update(&ctx, blk, len);
597 		}
598 	}
599 
600 	/* length block */
601 	bzero(blk, blksz);
602 	blkp = (uint32_t *)blk + 1;
603 	*blkp = htobe32(crp->crp_aad_length * 8);
604 	blkp = (uint32_t *)blk + 3;
605 	*blkp = htobe32(crp->crp_payload_length * 8);
606 	axf->Update(&ctx, blk, blksz);
607 
608 	/* Finalize MAC */
609 	axf->Final(aalg, &ctx);
610 
611 	/* Validate tag */
612 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
613 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
614 		    uaalg);
615 
616 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
617 		if (r != 0)
618 			return (EBADMSG);
619 
620 		/* tag matches, decrypt data */
621 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
622 			len = MIN(crp->crp_payload_length - i, blksz);
623 			if (len < blksz)
624 				bzero(blk, blksz);
625 			crypto_copydata(crp, crp->crp_payload_start + i, len,
626 			    blk);
627 			exf->decrypt(swe->sw_kschedule, blk);
628 			crypto_copyback(crp, crp->crp_payload_start + i, len,
629 			    blk);
630 		}
631 	} else {
632 		/* Inject the authentication data */
633 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
634 		    aalg);
635 	}
636 
637 	return (0);
638 }
639 
640 static int
641 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
642 {
643 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
644 	u_char *blk = (u_char *)blkbuf;
645 	u_char aalg[AALG_MAX_RESULT_LEN];
646 	u_char uaalg[AALG_MAX_RESULT_LEN];
647 	u_char iv[EALG_MAX_BLOCK_LEN];
648 	union authctx ctx;
649 	struct swcr_auth *swa;
650 	struct auth_hash *axf;
651 	int blksz, i, ivlen, len;
652 
653 	swa = &ses->swcr_auth;
654 	axf = swa->sw_axf;
655 
656 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
657 	blksz = axf->blocksize;
658 
659 	/* Initialize the IV */
660 	ivlen = AES_CCM_IV_LEN;
661 	crypto_read_iv(crp, iv);
662 
663 	/*
664 	 * AES CCM-CBC-MAC needs to know the length of both the auth
665 	 * data and payload data before doing the auth computation.
666 	 */
667 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
668 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
669 
670 	axf->Reinit(&ctx, iv, ivlen);
671 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
672 		len = MIN(crp->crp_payload_length - i, blksz);
673 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
674 		bzero(blk + len, blksz - len);
675 		axf->Update(&ctx, blk, blksz);
676 	}
677 
678 	/* Finalize MAC */
679 	axf->Final(aalg, &ctx);
680 
681 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
682 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
683 		    uaalg);
684 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
685 			return (EBADMSG);
686 	} else {
687 		/* Inject the authentication data */
688 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
689 	}
690 	return (0);
691 }
692 
693 static int
694 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
695 {
696 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
697 	u_char *blk = (u_char *)blkbuf;
698 	u_char aalg[AALG_MAX_RESULT_LEN];
699 	u_char uaalg[AALG_MAX_RESULT_LEN];
700 	u_char iv[EALG_MAX_BLOCK_LEN];
701 	union authctx ctx;
702 	struct swcr_auth *swa;
703 	struct swcr_encdec *swe;
704 	struct auth_hash *axf;
705 	struct enc_xform *exf;
706 	int blksz, i, ivlen, len, r;
707 
708 	swa = &ses->swcr_auth;
709 	axf = swa->sw_axf;
710 
711 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
712 	blksz = axf->blocksize;
713 
714 	swe = &ses->swcr_encdec;
715 	exf = swe->sw_exf;
716 
717 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
718 		return (EINVAL);
719 
720 	/* Initialize the IV */
721 	ivlen = AES_CCM_IV_LEN;
722 	bcopy(crp->crp_iv, iv, ivlen);
723 
724 	/*
725 	 * AES CCM-CBC-MAC needs to know the length of both the auth
726 	 * data and payload data before doing the auth computation.
727 	 */
728 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
729 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
730 
731 	/* Supply MAC with IV */
732 	axf->Reinit(&ctx, iv, ivlen);
733 
734 	/* Supply MAC with AAD */
735 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
736 		len = MIN(crp->crp_aad_length - i, blksz);
737 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
738 		bzero(blk + len, blksz - len);
739 		axf->Update(&ctx, blk, blksz);
740 	}
741 
742 	exf->reinit(swe->sw_kschedule, iv);
743 
744 	/* Do encryption/decryption with MAC */
745 	for (i = 0; i < crp->crp_payload_length; i += len) {
746 		len = MIN(crp->crp_payload_length - i, blksz);
747 		if (len < blksz)
748 			bzero(blk, blksz);
749 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
750 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
751 			axf->Update(&ctx, blk, len);
752 			exf->encrypt(swe->sw_kschedule, blk);
753 			crypto_copyback(crp, crp->crp_payload_start + i, len,
754 			    blk);
755 		} else {
756 			/*
757 			 * One of the problems with CCM+CBC is that
758 			 * the authentication is done on the
759 			 * unecncrypted data.  As a result, we have to
760 			 * decrypt the data twice: once to generate
761 			 * the tag and a second time after the tag is
762 			 * verified.
763 			 */
764 			exf->decrypt(swe->sw_kschedule, blk);
765 			axf->Update(&ctx, blk, len);
766 		}
767 	}
768 
769 	/* Finalize MAC */
770 	axf->Final(aalg, &ctx);
771 
772 	/* Validate tag */
773 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
774 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
775 		    uaalg);
776 
777 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
778 		if (r != 0)
779 			return (EBADMSG);
780 
781 		/* tag matches, decrypt data */
782 		exf->reinit(swe->sw_kschedule, iv);
783 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
784 			len = MIN(crp->crp_payload_length - i, blksz);
785 			if (len < blksz)
786 				bzero(blk, blksz);
787 			crypto_copydata(crp, crp->crp_payload_start + i, len,
788 			    blk);
789 			exf->decrypt(swe->sw_kschedule, blk);
790 			crypto_copyback(crp, crp->crp_payload_start + i, len,
791 			    blk);
792 		}
793 	} else {
794 		/* Inject the authentication data */
795 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
796 		    aalg);
797 	}
798 
799 	return (0);
800 }
801 
802 /*
803  * Apply a cipher and a digest to perform EtA.
804  */
805 static int
806 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
807 {
808 	int error;
809 
810 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
811 		error = swcr_encdec(ses, crp);
812 		if (error == 0)
813 			error = swcr_authcompute(ses, crp);
814 	} else {
815 		error = swcr_authcompute(ses, crp);
816 		if (error == 0)
817 			error = swcr_encdec(ses, crp);
818 	}
819 	return (error);
820 }
821 
822 /*
823  * Apply a compression/decompression algorithm
824  */
825 static int
826 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
827 {
828 	u_int8_t *data, *out;
829 	struct comp_algo *cxf;
830 	int adj;
831 	u_int32_t result;
832 
833 	cxf = ses->swcr_compdec.sw_cxf;
834 
835 	/* We must handle the whole buffer of data in one time
836 	 * then if there is not all the data in the mbuf, we must
837 	 * copy in a buffer.
838 	 */
839 
840 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
841 	if (data == NULL)
842 		return (EINVAL);
843 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
844 	    data);
845 
846 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
847 		result = cxf->compress(data, crp->crp_payload_length, &out);
848 	else
849 		result = cxf->decompress(data, crp->crp_payload_length, &out);
850 
851 	free(data, M_CRYPTO_DATA);
852 	if (result == 0)
853 		return (EINVAL);
854 	crp->crp_olen = result;
855 
856 	/* Check the compressed size when doing compression */
857 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
858 		if (result >= crp->crp_payload_length) {
859 			/* Compression was useless, we lost time */
860 			free(out, M_CRYPTO_DATA);
861 			return (0);
862 		}
863 	}
864 
865 	/* Copy back the (de)compressed data. m_copyback is
866 	 * extending the mbuf as necessary.
867 	 */
868 	crypto_copyback(crp, crp->crp_payload_start, result, out);
869 	if (result < crp->crp_payload_length) {
870 		switch (crp->crp_buf_type) {
871 		case CRYPTO_BUF_MBUF:
872 			adj = result - crp->crp_payload_length;
873 			m_adj(crp->crp_mbuf, adj);
874 			break;
875 		case CRYPTO_BUF_UIO: {
876 			struct uio *uio = crp->crp_uio;
877 			int ind;
878 
879 			adj = crp->crp_payload_length - result;
880 			ind = uio->uio_iovcnt - 1;
881 
882 			while (adj > 0 && ind >= 0) {
883 				if (adj < uio->uio_iov[ind].iov_len) {
884 					uio->uio_iov[ind].iov_len -= adj;
885 					break;
886 				}
887 
888 				adj -= uio->uio_iov[ind].iov_len;
889 				uio->uio_iov[ind].iov_len = 0;
890 				ind--;
891 				uio->uio_iovcnt--;
892 			}
893 			}
894 			break;
895 		}
896 	}
897 	free(out, M_CRYPTO_DATA);
898 	return 0;
899 }
900 
901 static int
902 swcr_setup_encdec(struct swcr_session *ses,
903     const struct crypto_session_params *csp)
904 {
905 	struct swcr_encdec *swe;
906 	struct enc_xform *txf;
907 	int error;
908 
909 	swe = &ses->swcr_encdec;
910 	txf = crypto_cipher(csp);
911 	MPASS(txf->ivsize == csp->csp_ivlen);
912 	if (csp->csp_cipher_key != NULL) {
913 		error = txf->setkey(&swe->sw_kschedule,
914 		    csp->csp_cipher_key, csp->csp_cipher_klen);
915 		if (error)
916 			return (error);
917 	}
918 	swe->sw_exf = txf;
919 	return (0);
920 }
921 
922 static int
923 swcr_setup_auth(struct swcr_session *ses,
924     const struct crypto_session_params *csp)
925 {
926 	struct swcr_auth *swa;
927 	struct auth_hash *axf;
928 
929 	swa = &ses->swcr_auth;
930 
931 	axf = crypto_auth_hash(csp);
932 	swa->sw_axf = axf;
933 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
934 		return (EINVAL);
935 	if (csp->csp_auth_mlen == 0)
936 		swa->sw_mlen = axf->hashsize;
937 	else
938 		swa->sw_mlen = csp->csp_auth_mlen;
939 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
940 	if (swa->sw_ictx == NULL)
941 		return (ENOBUFS);
942 
943 	switch (csp->csp_auth_alg) {
944 	case CRYPTO_MD5_HMAC:
945 	case CRYPTO_SHA1_HMAC:
946 	case CRYPTO_SHA2_224_HMAC:
947 	case CRYPTO_SHA2_256_HMAC:
948 	case CRYPTO_SHA2_384_HMAC:
949 	case CRYPTO_SHA2_512_HMAC:
950 	case CRYPTO_NULL_HMAC:
951 	case CRYPTO_RIPEMD160_HMAC:
952 		swa->sw_octx_len = axf->ctxsize;
953 		swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
954 		    M_NOWAIT);
955 		if (swa->sw_octx == NULL)
956 			return (ENOBUFS);
957 
958 		if (csp->csp_auth_key != NULL) {
959 			swcr_authprepare(axf, swa, csp->csp_auth_key,
960 			    csp->csp_auth_klen);
961 		}
962 
963 		if (csp->csp_mode == CSP_MODE_DIGEST)
964 			ses->swcr_process = swcr_authcompute;
965 		break;
966 	case CRYPTO_MD5_KPDK:
967 	case CRYPTO_SHA1_KPDK:
968 		swa->sw_octx_len = csp->csp_auth_klen;
969 		swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
970 		    M_NOWAIT);
971 		if (swa->sw_octx == NULL)
972 			return (ENOBUFS);
973 
974 		/* Store the key so we can "append" it to the payload */
975 		if (csp->csp_auth_key != NULL) {
976 			swcr_authprepare(axf, swa, csp->csp_auth_key,
977 			    csp->csp_auth_klen);
978 		}
979 
980 		if (csp->csp_mode == CSP_MODE_DIGEST)
981 			ses->swcr_process = swcr_authcompute;
982 		break;
983 	case CRYPTO_SHA1:
984 	case CRYPTO_SHA2_224:
985 	case CRYPTO_SHA2_256:
986 	case CRYPTO_SHA2_384:
987 	case CRYPTO_SHA2_512:
988 		axf->Init(swa->sw_ictx);
989 		if (csp->csp_mode == CSP_MODE_DIGEST)
990 			ses->swcr_process = swcr_authcompute;
991 		break;
992 	case CRYPTO_AES_NIST_GMAC:
993 		axf->Init(swa->sw_ictx);
994 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
995 		    csp->csp_auth_klen);
996 		if (csp->csp_mode == CSP_MODE_DIGEST)
997 			ses->swcr_process = swcr_gmac;
998 		break;
999 	case CRYPTO_POLY1305:
1000 	case CRYPTO_BLAKE2B:
1001 	case CRYPTO_BLAKE2S:
1002 		/*
1003 		 * Blake2b and Blake2s support an optional key but do
1004 		 * not require one.
1005 		 */
1006 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
1007 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1008 			    csp->csp_auth_klen);
1009 		axf->Init(swa->sw_ictx);
1010 		if (csp->csp_mode == CSP_MODE_DIGEST)
1011 			ses->swcr_process = swcr_authcompute;
1012 		break;
1013 	case CRYPTO_AES_CCM_CBC_MAC:
1014 		axf->Init(swa->sw_ictx);
1015 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1016 		    csp->csp_auth_klen);
1017 		if (csp->csp_mode == CSP_MODE_DIGEST)
1018 			ses->swcr_process = swcr_ccm_cbc_mac;
1019 		break;
1020 	}
1021 
1022 	return (0);
1023 }
1024 
1025 static int
1026 swcr_setup_gcm(struct swcr_session *ses,
1027     const struct crypto_session_params *csp)
1028 {
1029 	struct swcr_encdec *swe;
1030 	struct swcr_auth *swa;
1031 	struct enc_xform *txf;
1032 	struct auth_hash *axf;
1033 	int error;
1034 
1035 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
1036 		return (EINVAL);
1037 
1038 	/* First, setup the auth side. */
1039 	swa = &ses->swcr_auth;
1040 	switch (csp->csp_cipher_klen * 8) {
1041 	case 128:
1042 		axf = &auth_hash_nist_gmac_aes_128;
1043 		break;
1044 	case 192:
1045 		axf = &auth_hash_nist_gmac_aes_192;
1046 		break;
1047 	case 256:
1048 		axf = &auth_hash_nist_gmac_aes_256;
1049 		break;
1050 	default:
1051 		return (EINVAL);
1052 	}
1053 	swa->sw_axf = axf;
1054 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1055 		return (EINVAL);
1056 	if (csp->csp_auth_mlen == 0)
1057 		swa->sw_mlen = axf->hashsize;
1058 	else
1059 		swa->sw_mlen = csp->csp_auth_mlen;
1060 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1061 	if (swa->sw_ictx == NULL)
1062 		return (ENOBUFS);
1063 	axf->Init(swa->sw_ictx);
1064 	if (csp->csp_cipher_key != NULL)
1065 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1066 		    csp->csp_cipher_klen);
1067 
1068 	/* Second, setup the cipher side. */
1069 	swe = &ses->swcr_encdec;
1070 	txf = &enc_xform_aes_nist_gcm;
1071 	if (csp->csp_cipher_key != NULL) {
1072 		error = txf->setkey(&swe->sw_kschedule,
1073 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1074 		if (error)
1075 			return (error);
1076 	}
1077 	swe->sw_exf = txf;
1078 
1079 	return (0);
1080 }
1081 
1082 static int
1083 swcr_setup_ccm(struct swcr_session *ses,
1084     const struct crypto_session_params *csp)
1085 {
1086 	struct swcr_encdec *swe;
1087 	struct swcr_auth *swa;
1088 	struct enc_xform *txf;
1089 	struct auth_hash *axf;
1090 	int error;
1091 
1092 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1093 		return (EINVAL);
1094 
1095 	/* First, setup the auth side. */
1096 	swa = &ses->swcr_auth;
1097 	switch (csp->csp_cipher_klen * 8) {
1098 	case 128:
1099 		axf = &auth_hash_ccm_cbc_mac_128;
1100 		break;
1101 	case 192:
1102 		axf = &auth_hash_ccm_cbc_mac_192;
1103 		break;
1104 	case 256:
1105 		axf = &auth_hash_ccm_cbc_mac_256;
1106 		break;
1107 	default:
1108 		return (EINVAL);
1109 	}
1110 	swa->sw_axf = axf;
1111 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1112 		return (EINVAL);
1113 	if (csp->csp_auth_mlen == 0)
1114 		swa->sw_mlen = axf->hashsize;
1115 	else
1116 		swa->sw_mlen = csp->csp_auth_mlen;
1117 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1118 	if (swa->sw_ictx == NULL)
1119 		return (ENOBUFS);
1120 	axf->Init(swa->sw_ictx);
1121 	if (csp->csp_cipher_key != NULL)
1122 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1123 		    csp->csp_cipher_klen);
1124 
1125 	/* Second, setup the cipher side. */
1126 	swe = &ses->swcr_encdec;
1127 	txf = &enc_xform_ccm;
1128 	if (csp->csp_cipher_key != NULL) {
1129 		error = txf->setkey(&swe->sw_kschedule,
1130 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1131 		if (error)
1132 			return (error);
1133 	}
1134 	swe->sw_exf = txf;
1135 
1136 	return (0);
1137 }
1138 
1139 static bool
1140 swcr_auth_supported(const struct crypto_session_params *csp)
1141 {
1142 	struct auth_hash *axf;
1143 
1144 	axf = crypto_auth_hash(csp);
1145 	if (axf == NULL)
1146 		return (false);
1147 	switch (csp->csp_auth_alg) {
1148 	case CRYPTO_MD5_HMAC:
1149 	case CRYPTO_SHA1_HMAC:
1150 	case CRYPTO_SHA2_224_HMAC:
1151 	case CRYPTO_SHA2_256_HMAC:
1152 	case CRYPTO_SHA2_384_HMAC:
1153 	case CRYPTO_SHA2_512_HMAC:
1154 	case CRYPTO_NULL_HMAC:
1155 	case CRYPTO_RIPEMD160_HMAC:
1156 	case CRYPTO_MD5_KPDK:
1157 	case CRYPTO_SHA1_KPDK:
1158 		break;
1159 	case CRYPTO_AES_NIST_GMAC:
1160 		switch (csp->csp_auth_klen * 8) {
1161 		case 128:
1162 		case 192:
1163 		case 256:
1164 			break;
1165 		default:
1166 			return (false);
1167 		}
1168 		if (csp->csp_auth_key == NULL)
1169 			return (false);
1170 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1171 			return (false);
1172 		break;
1173 	case CRYPTO_POLY1305:
1174 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1175 			return (false);
1176 		break;
1177 	case CRYPTO_AES_CCM_CBC_MAC:
1178 		switch (csp->csp_auth_klen * 8) {
1179 		case 128:
1180 		case 192:
1181 		case 256:
1182 			break;
1183 		default:
1184 			return (false);
1185 		}
1186 		if (csp->csp_auth_key == NULL)
1187 			return (false);
1188 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1189 			return (false);
1190 		break;
1191 	}
1192 	return (true);
1193 }
1194 
1195 static bool
1196 swcr_cipher_supported(const struct crypto_session_params *csp)
1197 {
1198 	struct enc_xform *txf;
1199 
1200 	txf = crypto_cipher(csp);
1201 	if (txf == NULL)
1202 		return (false);
1203 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1204 	    txf->ivsize != csp->csp_ivlen)
1205 		return (false);
1206 	return (true);
1207 }
1208 
1209 static int
1210 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1211 {
1212 
1213 	if (csp->csp_flags != 0)
1214 		return (EINVAL);
1215 	switch (csp->csp_mode) {
1216 	case CSP_MODE_COMPRESS:
1217 		switch (csp->csp_cipher_alg) {
1218 		case CRYPTO_DEFLATE_COMP:
1219 			break;
1220 		default:
1221 			return (EINVAL);
1222 		}
1223 		break;
1224 	case CSP_MODE_CIPHER:
1225 		switch (csp->csp_cipher_alg) {
1226 		case CRYPTO_AES_NIST_GCM_16:
1227 		case CRYPTO_AES_CCM_16:
1228 			return (EINVAL);
1229 		default:
1230 			if (!swcr_cipher_supported(csp))
1231 				return (EINVAL);
1232 			break;
1233 		}
1234 		break;
1235 	case CSP_MODE_DIGEST:
1236 		if (!swcr_auth_supported(csp))
1237 			return (EINVAL);
1238 		break;
1239 	case CSP_MODE_AEAD:
1240 		switch (csp->csp_cipher_alg) {
1241 		case CRYPTO_AES_NIST_GCM_16:
1242 		case CRYPTO_AES_CCM_16:
1243 			break;
1244 		default:
1245 			return (EINVAL);
1246 		}
1247 		break;
1248 	case CSP_MODE_ETA:
1249 		/* AEAD algorithms cannot be used for EtA. */
1250 		switch (csp->csp_cipher_alg) {
1251 		case CRYPTO_AES_NIST_GCM_16:
1252 		case CRYPTO_AES_CCM_16:
1253 			return (EINVAL);
1254 		}
1255 		switch (csp->csp_auth_alg) {
1256 		case CRYPTO_AES_NIST_GMAC:
1257 		case CRYPTO_AES_CCM_CBC_MAC:
1258 			return (EINVAL);
1259 		}
1260 
1261 		if (!swcr_cipher_supported(csp) ||
1262 		    !swcr_auth_supported(csp))
1263 			return (EINVAL);
1264 		break;
1265 	default:
1266 		return (EINVAL);
1267 	}
1268 
1269 	return (CRYPTODEV_PROBE_SOFTWARE);
1270 }
1271 
1272 /*
1273  * Generate a new software session.
1274  */
1275 static int
1276 swcr_newsession(device_t dev, crypto_session_t cses,
1277     const struct crypto_session_params *csp)
1278 {
1279 	struct swcr_session *ses;
1280 	struct swcr_encdec *swe;
1281 	struct swcr_auth *swa;
1282 	struct comp_algo *cxf;
1283 	int error;
1284 
1285 	ses = crypto_get_driver_session(cses);
1286 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1287 
1288 	error = 0;
1289 	swe = &ses->swcr_encdec;
1290 	swa = &ses->swcr_auth;
1291 	switch (csp->csp_mode) {
1292 	case CSP_MODE_COMPRESS:
1293 		switch (csp->csp_cipher_alg) {
1294 		case CRYPTO_DEFLATE_COMP:
1295 			cxf = &comp_algo_deflate;
1296 			break;
1297 #ifdef INVARIANTS
1298 		default:
1299 			panic("bad compression algo");
1300 #endif
1301 		}
1302 		ses->swcr_compdec.sw_cxf = cxf;
1303 		ses->swcr_process = swcr_compdec;
1304 		break;
1305 	case CSP_MODE_CIPHER:
1306 		switch (csp->csp_cipher_alg) {
1307 		case CRYPTO_NULL_CBC:
1308 			ses->swcr_process = swcr_null;
1309 			break;
1310 #ifdef INVARIANTS
1311 		case CRYPTO_AES_NIST_GCM_16:
1312 		case CRYPTO_AES_CCM_16:
1313 			panic("bad cipher algo");
1314 #endif
1315 		default:
1316 			error = swcr_setup_encdec(ses, csp);
1317 			if (error == 0)
1318 				ses->swcr_process = swcr_encdec;
1319 		}
1320 		break;
1321 	case CSP_MODE_DIGEST:
1322 		error = swcr_setup_auth(ses, csp);
1323 		break;
1324 	case CSP_MODE_AEAD:
1325 		switch (csp->csp_cipher_alg) {
1326 		case CRYPTO_AES_NIST_GCM_16:
1327 			error = swcr_setup_gcm(ses, csp);
1328 			if (error == 0)
1329 				ses->swcr_process = swcr_gcm;
1330 			break;
1331 		case CRYPTO_AES_CCM_16:
1332 			error = swcr_setup_ccm(ses, csp);
1333 			if (error == 0)
1334 				ses->swcr_process = swcr_ccm;
1335 			break;
1336 #ifdef INVARIANTS
1337 		default:
1338 			panic("bad aead algo");
1339 #endif
1340 		}
1341 		break;
1342 	case CSP_MODE_ETA:
1343 #ifdef INVARIANTS
1344 		switch (csp->csp_cipher_alg) {
1345 		case CRYPTO_AES_NIST_GCM_16:
1346 		case CRYPTO_AES_CCM_16:
1347 			panic("bad eta cipher algo");
1348 		}
1349 		switch (csp->csp_auth_alg) {
1350 		case CRYPTO_AES_NIST_GMAC:
1351 		case CRYPTO_AES_CCM_CBC_MAC:
1352 			panic("bad eta auth algo");
1353 		}
1354 #endif
1355 
1356 		error = swcr_setup_auth(ses, csp);
1357 		if (error)
1358 			break;
1359 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1360 			/* Effectively degrade to digest mode. */
1361 			ses->swcr_process = swcr_authcompute;
1362 			break;
1363 		}
1364 
1365 		error = swcr_setup_encdec(ses, csp);
1366 		if (error == 0)
1367 			ses->swcr_process = swcr_eta;
1368 		break;
1369 	default:
1370 		error = EINVAL;
1371 	}
1372 
1373 	if (error)
1374 		swcr_freesession(dev, cses);
1375 	return (error);
1376 }
1377 
1378 static void
1379 swcr_freesession(device_t dev, crypto_session_t cses)
1380 {
1381 	struct swcr_session *ses;
1382 	struct swcr_auth *swa;
1383 	struct enc_xform *txf;
1384 	struct auth_hash *axf;
1385 
1386 	ses = crypto_get_driver_session(cses);
1387 
1388 	mtx_destroy(&ses->swcr_lock);
1389 
1390 	txf = ses->swcr_encdec.sw_exf;
1391 	if (txf != NULL) {
1392 		if (ses->swcr_encdec.sw_kschedule != NULL)
1393 			txf->zerokey(&(ses->swcr_encdec.sw_kschedule));
1394 	}
1395 
1396 	axf = ses->swcr_auth.sw_axf;
1397 	if (axf != NULL) {
1398 		swa = &ses->swcr_auth;
1399 		if (swa->sw_ictx != NULL) {
1400 			explicit_bzero(swa->sw_ictx, axf->ctxsize);
1401 			free(swa->sw_ictx, M_CRYPTO_DATA);
1402 		}
1403 		if (swa->sw_octx != NULL) {
1404 			explicit_bzero(swa->sw_octx, swa->sw_octx_len);
1405 			free(swa->sw_octx, M_CRYPTO_DATA);
1406 		}
1407 	}
1408 }
1409 
1410 /*
1411  * Process a software request.
1412  */
1413 static int
1414 swcr_process(device_t dev, struct cryptop *crp, int hint)
1415 {
1416 	struct swcr_session *ses;
1417 
1418 	ses = crypto_get_driver_session(crp->crp_session);
1419 	mtx_lock(&ses->swcr_lock);
1420 
1421 	crp->crp_etype = ses->swcr_process(ses, crp);
1422 
1423 	mtx_unlock(&ses->swcr_lock);
1424 	crypto_done(crp);
1425 	return (0);
1426 }
1427 
1428 static void
1429 swcr_identify(driver_t *drv, device_t parent)
1430 {
1431 	/* NB: order 10 is so we get attached after h/w devices */
1432 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1433 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1434 		panic("cryptosoft: could not attach");
1435 }
1436 
1437 static int
1438 swcr_probe(device_t dev)
1439 {
1440 	device_set_desc(dev, "software crypto");
1441 	return (BUS_PROBE_NOWILDCARD);
1442 }
1443 
1444 static int
1445 swcr_attach(device_t dev)
1446 {
1447 
1448 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1449 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1450 	if (swcr_id < 0) {
1451 		device_printf(dev, "cannot initialize!");
1452 		return (ENXIO);
1453 	}
1454 
1455 	return (0);
1456 }
1457 
1458 static int
1459 swcr_detach(device_t dev)
1460 {
1461 	crypto_unregister_all(swcr_id);
1462 	return 0;
1463 }
1464 
1465 static device_method_t swcr_methods[] = {
1466 	DEVMETHOD(device_identify,	swcr_identify),
1467 	DEVMETHOD(device_probe,		swcr_probe),
1468 	DEVMETHOD(device_attach,	swcr_attach),
1469 	DEVMETHOD(device_detach,	swcr_detach),
1470 
1471 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1472 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1473 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1474 	DEVMETHOD(cryptodev_process,	swcr_process),
1475 
1476 	{0, 0},
1477 };
1478 
1479 static driver_t swcr_driver = {
1480 	"cryptosoft",
1481 	swcr_methods,
1482 	0,		/* NB: no softc */
1483 };
1484 static devclass_t swcr_devclass;
1485 
1486 /*
1487  * NB: We explicitly reference the crypto module so we
1488  * get the necessary ordering when built as a loadable
1489  * module.  This is required because we bundle the crypto
1490  * module code together with the cryptosoft driver (otherwise
1491  * normal module dependencies would handle things).
1492  */
1493 extern int crypto_modevent(struct module *, int, void *);
1494 /* XXX where to attach */
1495 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1496 MODULE_VERSION(cryptosoft, 1);
1497 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1498