xref: /freebsd/sys/opencrypto/cryptosoft.c (revision e4e041744071ffade1a1c5e08714ed1d5db8e6a6)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/blowfish/blowfish.h>
51 #include <crypto/sha1.h>
52 #include <opencrypto/rmd160.h>
53 #include <opencrypto/cast.h>
54 #include <opencrypto/skipjack.h>
55 #include <sys/md5.h>
56 
57 #include <opencrypto/cryptodev.h>
58 #include <opencrypto/xform.h>
59 
60 #include <sys/kobj.h>
61 #include <sys/bus.h>
62 #include "cryptodev_if.h"
63 
64 struct swcr_auth {
65 	void		*sw_ictx;
66 	void		*sw_octx;
67 	struct auth_hash *sw_axf;
68 	uint16_t	sw_mlen;
69 	uint16_t	sw_octx_len;
70 };
71 
72 struct swcr_encdec {
73 	uint8_t		*sw_kschedule;
74 	struct enc_xform *sw_exf;
75 };
76 
77 struct swcr_compdec {
78 	struct comp_algo *sw_cxf;
79 };
80 
81 struct swcr_session {
82 	struct mtx	swcr_lock;
83 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
84 
85 	struct swcr_auth swcr_auth;
86 	struct swcr_encdec swcr_encdec;
87 	struct swcr_compdec swcr_compdec;
88 };
89 
90 static	int32_t swcr_id;
91 
92 static	void swcr_freesession(device_t dev, crypto_session_t cses);
93 
94 /* Used for CRYPTO_NULL_CBC. */
95 static int
96 swcr_null(struct swcr_session *ses, struct cryptop *crp)
97 {
98 
99 	return (0);
100 }
101 
102 /*
103  * Apply a symmetric encryption/decryption algorithm.
104  */
105 static int
106 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
107 {
108 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
109 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
110 	const struct crypto_session_params *csp;
111 	struct swcr_encdec *sw;
112 	struct enc_xform *exf;
113 	int i, j, k, blks, ind, count, ivlen;
114 	struct uio *uio, uiolcl;
115 	struct iovec iovlcl[4];
116 	struct iovec *iov;
117 	int iovcnt, iovalloc;
118 	int error;
119 	bool encrypting;
120 
121 	error = 0;
122 
123 	sw = &ses->swcr_encdec;
124 	exf = sw->sw_exf;
125 	blks = exf->blocksize;
126 	ivlen = exf->ivsize;
127 
128 	/* Check for non-padded data */
129 	if ((crp->crp_payload_length % blks) != 0)
130 		return EINVAL;
131 
132 	if (exf == &enc_xform_aes_icm &&
133 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
134 		return (EINVAL);
135 
136 	crypto_read_iv(crp, iv);
137 
138 	if (crp->crp_cipher_key != NULL) {
139 		if (sw->sw_kschedule)
140 			exf->zerokey(&(sw->sw_kschedule));
141 
142 		csp = crypto_get_params(crp->crp_session);
143 		error = exf->setkey(&sw->sw_kschedule,
144 		    crp->crp_cipher_key, csp->csp_cipher_klen);
145 		if (error)
146 			return (error);
147 	}
148 
149 	iov = iovlcl;
150 	iovcnt = nitems(iovlcl);
151 	iovalloc = 0;
152 	uio = &uiolcl;
153 	switch (crp->crp_buf_type) {
154 	case CRYPTO_BUF_MBUF:
155 		error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt,
156 		    &iovalloc);
157 		if (error)
158 			return (error);
159 		uio->uio_iov = iov;
160 		uio->uio_iovcnt = iovcnt;
161 		break;
162 	case CRYPTO_BUF_UIO:
163 		uio = crp->crp_uio;
164 		break;
165 	case CRYPTO_BUF_CONTIG:
166 		iov[0].iov_base = crp->crp_buf;
167 		iov[0].iov_len = crp->crp_ilen;
168 		uio->uio_iov = iov;
169 		uio->uio_iovcnt = 1;
170 		break;
171 	}
172 
173 	ivp = iv;
174 
175 	if (exf->reinit) {
176 		/*
177 		 * xforms that provide a reinit method perform all IV
178 		 * handling themselves.
179 		 */
180 		exf->reinit(sw->sw_kschedule, iv);
181 	}
182 
183 	count = crp->crp_payload_start;
184 	ind = cuio_getptr(uio, count, &k);
185 	if (ind == -1) {
186 		error = EINVAL;
187 		goto out;
188 	}
189 
190 	i = crp->crp_payload_length;
191 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
192 
193 	while (i > 0) {
194 		/*
195 		 * If there's insufficient data at the end of
196 		 * an iovec, we have to do some copying.
197 		 */
198 		if (uio->uio_iov[ind].iov_len < k + blks &&
199 		    uio->uio_iov[ind].iov_len != k) {
200 			cuio_copydata(uio, count, blks, blk);
201 
202 			/* Actual encryption/decryption */
203 			if (exf->reinit) {
204 				if (encrypting) {
205 					exf->encrypt(sw->sw_kschedule,
206 					    blk);
207 				} else {
208 					exf->decrypt(sw->sw_kschedule,
209 					    blk);
210 				}
211 			} else if (encrypting) {
212 				/* XOR with previous block */
213 				for (j = 0; j < blks; j++)
214 					blk[j] ^= ivp[j];
215 
216 				exf->encrypt(sw->sw_kschedule, blk);
217 
218 				/*
219 				 * Keep encrypted block for XOR'ing
220 				 * with next block
221 				 */
222 				bcopy(blk, iv, blks);
223 				ivp = iv;
224 			} else {	/* decrypt */
225 				/*
226 				 * Keep encrypted block for XOR'ing
227 				 * with next block
228 				 */
229 				nivp = (ivp == iv) ? iv2 : iv;
230 				bcopy(blk, nivp, blks);
231 
232 				exf->decrypt(sw->sw_kschedule, blk);
233 
234 				/* XOR with previous block */
235 				for (j = 0; j < blks; j++)
236 					blk[j] ^= ivp[j];
237 
238 				ivp = nivp;
239 			}
240 
241 			/* Copy back decrypted block */
242 			cuio_copyback(uio, count, blks, blk);
243 
244 			count += blks;
245 
246 			/* Advance pointer */
247 			ind = cuio_getptr(uio, count, &k);
248 			if (ind == -1) {
249 				error = EINVAL;
250 				goto out;
251 			}
252 
253 			i -= blks;
254 
255 			/* Could be done... */
256 			if (i == 0)
257 				break;
258 		}
259 
260 		while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
261 			uint8_t *idat;
262 			size_t nb, rem;
263 
264 			nb = blks;
265 			rem = MIN((size_t)i,
266 			    uio->uio_iov[ind].iov_len - (size_t)k);
267 			idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
268 
269 			if (exf->reinit) {
270 				if (encrypting && exf->encrypt_multi == NULL)
271 					exf->encrypt(sw->sw_kschedule,
272 					    idat);
273 				else if (encrypting) {
274 					nb = rounddown(rem, blks);
275 					exf->encrypt_multi(sw->sw_kschedule,
276 					    idat, nb);
277 				} else if (exf->decrypt_multi == NULL)
278 					exf->decrypt(sw->sw_kschedule,
279 					    idat);
280 				else {
281 					nb = rounddown(rem, blks);
282 					exf->decrypt_multi(sw->sw_kschedule,
283 					    idat, nb);
284 				}
285 			} else if (encrypting) {
286 				/* XOR with previous block/IV */
287 				for (j = 0; j < blks; j++)
288 					idat[j] ^= ivp[j];
289 
290 				exf->encrypt(sw->sw_kschedule, idat);
291 				ivp = idat;
292 			} else {	/* decrypt */
293 				/*
294 				 * Keep encrypted block to be used
295 				 * in next block's processing.
296 				 */
297 				nivp = (ivp == iv) ? iv2 : iv;
298 				bcopy(idat, nivp, blks);
299 
300 				exf->decrypt(sw->sw_kschedule, idat);
301 
302 				/* XOR with previous block/IV */
303 				for (j = 0; j < blks; j++)
304 					idat[j] ^= ivp[j];
305 
306 				ivp = nivp;
307 			}
308 
309 			count += nb;
310 			k += nb;
311 			i -= nb;
312 		}
313 
314 		/*
315 		 * Advance to the next iov if the end of the current iov
316 		 * is aligned with the end of a cipher block.
317 		 * Note that the code is equivalent to calling:
318 		 *      ind = cuio_getptr(uio, count, &k);
319 		 */
320 		if (i > 0 && k == uio->uio_iov[ind].iov_len) {
321 			k = 0;
322 			ind++;
323 			if (ind >= uio->uio_iovcnt) {
324 				error = EINVAL;
325 				goto out;
326 			}
327 		}
328 	}
329 
330 out:
331 	if (iovalloc)
332 		free(iov, M_CRYPTO_DATA);
333 
334 	return (error);
335 }
336 
337 static void
338 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
339     const uint8_t *key, int klen)
340 {
341 
342 	switch (axf->type) {
343 	case CRYPTO_MD5_HMAC:
344 	case CRYPTO_SHA1_HMAC:
345 	case CRYPTO_SHA2_224_HMAC:
346 	case CRYPTO_SHA2_256_HMAC:
347 	case CRYPTO_SHA2_384_HMAC:
348 	case CRYPTO_SHA2_512_HMAC:
349 	case CRYPTO_NULL_HMAC:
350 	case CRYPTO_RIPEMD160_HMAC:
351 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
352 		hmac_init_opad(axf, key, klen, sw->sw_octx);
353 		break;
354 	case CRYPTO_MD5_KPDK:
355 	case CRYPTO_SHA1_KPDK:
356 	{
357 		/*
358 		 * We need a buffer that can hold an md5 and a sha1 result
359 		 * just to throw it away.
360 		 * What we do here is the initial part of:
361 		 *   ALGO( key, keyfill, .. )
362 		 * adding the key to sw_ictx and abusing Final() to get the
363 		 * "keyfill" padding.
364 		 * In addition we abuse the sw_octx to save the key to have
365 		 * it to be able to append it at the end in swcr_authcompute().
366 		 */
367 		u_char buf[SHA1_RESULTLEN];
368 
369 		bcopy(key, sw->sw_octx, klen);
370 		axf->Init(sw->sw_ictx);
371 		axf->Update(sw->sw_ictx, key, klen);
372 		axf->Final(buf, sw->sw_ictx);
373 		break;
374 	}
375 	case CRYPTO_POLY1305:
376 	case CRYPTO_BLAKE2B:
377 	case CRYPTO_BLAKE2S:
378 		axf->Setkey(sw->sw_ictx, key, klen);
379 		axf->Init(sw->sw_ictx);
380 		break;
381 	default:
382 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
383 	}
384 }
385 
386 /*
387  * Compute or verify hash.
388  */
389 static int
390 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
391 {
392 	u_char aalg[HASH_MAX_LEN];
393 	u_char uaalg[HASH_MAX_LEN];
394 	const struct crypto_session_params *csp;
395 	struct swcr_auth *sw;
396 	struct auth_hash *axf;
397 	union authctx ctx;
398 	int err;
399 
400 	sw = &ses->swcr_auth;
401 
402 	axf = sw->sw_axf;
403 
404 	if (crp->crp_auth_key != NULL) {
405 		csp = crypto_get_params(crp->crp_session);
406 		swcr_authprepare(axf, sw, crp->crp_auth_key,
407 		    csp->csp_auth_klen);
408 	}
409 
410 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
411 
412 	err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
413 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
414 	if (err)
415 		return err;
416 
417 	err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
418 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
419 	if (err)
420 		return err;
421 
422 	switch (axf->type) {
423 	case CRYPTO_SHA1:
424 	case CRYPTO_SHA2_224:
425 	case CRYPTO_SHA2_256:
426 	case CRYPTO_SHA2_384:
427 	case CRYPTO_SHA2_512:
428 		axf->Final(aalg, &ctx);
429 		break;
430 
431 	case CRYPTO_MD5_HMAC:
432 	case CRYPTO_SHA1_HMAC:
433 	case CRYPTO_SHA2_224_HMAC:
434 	case CRYPTO_SHA2_256_HMAC:
435 	case CRYPTO_SHA2_384_HMAC:
436 	case CRYPTO_SHA2_512_HMAC:
437 	case CRYPTO_RIPEMD160_HMAC:
438 		if (sw->sw_octx == NULL)
439 			return EINVAL;
440 
441 		axf->Final(aalg, &ctx);
442 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
443 		axf->Update(&ctx, aalg, axf->hashsize);
444 		axf->Final(aalg, &ctx);
445 		break;
446 
447 	case CRYPTO_MD5_KPDK:
448 	case CRYPTO_SHA1_KPDK:
449 		/* If we have no key saved, return error. */
450 		if (sw->sw_octx == NULL)
451 			return EINVAL;
452 
453 		/*
454 		 * Add the trailing copy of the key (see comment in
455 		 * swcr_authprepare()) after the data:
456 		 *   ALGO( .., key, algofill )
457 		 * and let Final() do the proper, natural "algofill"
458 		 * padding.
459 		 */
460 		axf->Update(&ctx, sw->sw_octx, sw->sw_octx_len);
461 		axf->Final(aalg, &ctx);
462 		break;
463 
464 	case CRYPTO_BLAKE2B:
465 	case CRYPTO_BLAKE2S:
466 	case CRYPTO_NULL_HMAC:
467 	case CRYPTO_POLY1305:
468 		axf->Final(aalg, &ctx);
469 		break;
470 	}
471 
472 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
473 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
474 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
475 			return (EBADMSG);
476 	} else {
477 		/* Inject the authentication data */
478 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
479 	}
480 	return (0);
481 }
482 
483 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
484 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
485 
486 static int
487 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
488 {
489 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
490 	u_char *blk = (u_char *)blkbuf;
491 	u_char aalg[AALG_MAX_RESULT_LEN];
492 	u_char uaalg[AALG_MAX_RESULT_LEN];
493 	u_char iv[EALG_MAX_BLOCK_LEN];
494 	union authctx ctx;
495 	struct swcr_auth *swa;
496 	struct auth_hash *axf;
497 	uint32_t *blkp;
498 	int blksz, i, ivlen, len;
499 
500 	swa = &ses->swcr_auth;
501 	axf = swa->sw_axf;
502 
503 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
504 	blksz = axf->blocksize;
505 
506 	/* Initialize the IV */
507 	ivlen = AES_GCM_IV_LEN;
508 	crypto_read_iv(crp, iv);
509 
510 	axf->Reinit(&ctx, iv, ivlen);
511 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
512 		len = MIN(crp->crp_payload_length - i, blksz);
513 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
514 		bzero(blk + len, blksz - len);
515 		axf->Update(&ctx, blk, blksz);
516 	}
517 
518 	/* length block */
519 	bzero(blk, blksz);
520 	blkp = (uint32_t *)blk + 1;
521 	*blkp = htobe32(crp->crp_payload_length * 8);
522 	axf->Update(&ctx, blk, blksz);
523 
524 	/* Finalize MAC */
525 	axf->Final(aalg, &ctx);
526 
527 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
528 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
529 		    uaalg);
530 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
531 			return (EBADMSG);
532 	} else {
533 		/* Inject the authentication data */
534 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
535 	}
536 	return (0);
537 }
538 
539 static int
540 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
541 {
542 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
543 	u_char *blk = (u_char *)blkbuf;
544 	u_char aalg[AALG_MAX_RESULT_LEN];
545 	u_char uaalg[AALG_MAX_RESULT_LEN];
546 	u_char iv[EALG_MAX_BLOCK_LEN];
547 	union authctx ctx;
548 	struct swcr_auth *swa;
549 	struct swcr_encdec *swe;
550 	struct auth_hash *axf;
551 	struct enc_xform *exf;
552 	uint32_t *blkp;
553 	int blksz, i, ivlen, len, r;
554 
555 	swa = &ses->swcr_auth;
556 	axf = swa->sw_axf;
557 
558 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
559 	blksz = axf->blocksize;
560 
561 	swe = &ses->swcr_encdec;
562 	exf = swe->sw_exf;
563 
564 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
565 		return (EINVAL);
566 
567 	/* Initialize the IV */
568 	ivlen = AES_GCM_IV_LEN;
569 	bcopy(crp->crp_iv, iv, ivlen);
570 
571 	/* Supply MAC with IV */
572 	axf->Reinit(&ctx, iv, ivlen);
573 
574 	/* Supply MAC with AAD */
575 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
576 		len = MIN(crp->crp_aad_length - i, blksz);
577 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
578 		bzero(blk + len, blksz - len);
579 		axf->Update(&ctx, blk, blksz);
580 	}
581 
582 	exf->reinit(swe->sw_kschedule, iv);
583 
584 	/* Do encryption with MAC */
585 	for (i = 0; i < crp->crp_payload_length; i += len) {
586 		len = MIN(crp->crp_payload_length - i, blksz);
587 		if (len < blksz)
588 			bzero(blk, blksz);
589 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
590 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
591 			exf->encrypt(swe->sw_kschedule, blk);
592 			axf->Update(&ctx, blk, len);
593 			crypto_copyback(crp, crp->crp_payload_start + i, len,
594 			    blk);
595 		} else {
596 			axf->Update(&ctx, blk, len);
597 		}
598 	}
599 
600 	/* length block */
601 	bzero(blk, blksz);
602 	blkp = (uint32_t *)blk + 1;
603 	*blkp = htobe32(crp->crp_aad_length * 8);
604 	blkp = (uint32_t *)blk + 3;
605 	*blkp = htobe32(crp->crp_payload_length * 8);
606 	axf->Update(&ctx, blk, blksz);
607 
608 	/* Finalize MAC */
609 	axf->Final(aalg, &ctx);
610 
611 	/* Validate tag */
612 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
613 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
614 		    uaalg);
615 
616 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
617 		if (r != 0)
618 			return (EBADMSG);
619 
620 		/* tag matches, decrypt data */
621 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
622 			len = MIN(crp->crp_payload_length - i, blksz);
623 			if (len < blksz)
624 				bzero(blk, blksz);
625 			crypto_copydata(crp, crp->crp_payload_start + i, len,
626 			    blk);
627 			exf->decrypt(swe->sw_kschedule, blk);
628 			crypto_copyback(crp, crp->crp_payload_start + i, len,
629 			    blk);
630 		}
631 	} else {
632 		/* Inject the authentication data */
633 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
634 		    aalg);
635 	}
636 
637 	return (0);
638 }
639 
640 static int
641 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
642 {
643 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
644 	u_char *blk = (u_char *)blkbuf;
645 	u_char aalg[AALG_MAX_RESULT_LEN];
646 	u_char uaalg[AALG_MAX_RESULT_LEN];
647 	u_char iv[EALG_MAX_BLOCK_LEN];
648 	union authctx ctx;
649 	struct swcr_auth *swa;
650 	struct auth_hash *axf;
651 	int blksz, i, ivlen, len;
652 
653 	swa = &ses->swcr_auth;
654 	axf = swa->sw_axf;
655 
656 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
657 	blksz = axf->blocksize;
658 
659 	/* Initialize the IV */
660 	ivlen = AES_CCM_IV_LEN;
661 	crypto_read_iv(crp, iv);
662 
663 	/*
664 	 * AES CCM-CBC-MAC needs to know the length of both the auth
665 	 * data and payload data before doing the auth computation.
666 	 */
667 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
668 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
669 
670 	axf->Reinit(&ctx, iv, ivlen);
671 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
672 		len = MIN(crp->crp_payload_length - i, blksz);
673 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
674 		bzero(blk + len, blksz - len);
675 		axf->Update(&ctx, blk, blksz);
676 	}
677 
678 	/* Finalize MAC */
679 	axf->Final(aalg, &ctx);
680 
681 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
682 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
683 		    uaalg);
684 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
685 			return (EBADMSG);
686 	} else {
687 		/* Inject the authentication data */
688 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
689 	}
690 	return (0);
691 }
692 
693 static int
694 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
695 {
696 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
697 	u_char *blk = (u_char *)blkbuf;
698 	u_char aalg[AALG_MAX_RESULT_LEN];
699 	u_char uaalg[AALG_MAX_RESULT_LEN];
700 	u_char iv[EALG_MAX_BLOCK_LEN];
701 	union authctx ctx;
702 	struct swcr_auth *swa;
703 	struct swcr_encdec *swe;
704 	struct auth_hash *axf;
705 	struct enc_xform *exf;
706 	int blksz, i, ivlen, len, r;
707 
708 	swa = &ses->swcr_auth;
709 	axf = swa->sw_axf;
710 
711 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
712 	blksz = axf->blocksize;
713 
714 	swe = &ses->swcr_encdec;
715 	exf = swe->sw_exf;
716 
717 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
718 		return (EINVAL);
719 
720 	/* Initialize the IV */
721 	ivlen = AES_CCM_IV_LEN;
722 	bcopy(crp->crp_iv, iv, ivlen);
723 
724 	/*
725 	 * AES CCM-CBC-MAC needs to know the length of both the auth
726 	 * data and payload data before doing the auth computation.
727 	 */
728 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
729 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
730 
731 	/* Supply MAC with IV */
732 	axf->Reinit(&ctx, iv, ivlen);
733 
734 	/* Supply MAC with AAD */
735 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
736 		len = MIN(crp->crp_aad_length - i, blksz);
737 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
738 		bzero(blk + len, blksz - len);
739 		axf->Update(&ctx, blk, blksz);
740 	}
741 
742 	exf->reinit(swe->sw_kschedule, iv);
743 
744 	/* Do encryption/decryption with MAC */
745 	for (i = 0; i < crp->crp_payload_length; i += len) {
746 		len = MIN(crp->crp_payload_length - i, blksz);
747 		if (len < blksz)
748 			bzero(blk, blksz);
749 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
750 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
751 			axf->Update(&ctx, blk, len);
752 			exf->encrypt(swe->sw_kschedule, blk);
753 			crypto_copyback(crp, crp->crp_payload_start + i, len,
754 			    blk);
755 		} else {
756 			/*
757 			 * One of the problems with CCM+CBC is that
758 			 * the authentication is done on the
759 			 * unecncrypted data.  As a result, we have to
760 			 * decrypt the data twice: once to generate
761 			 * the tag and a second time after the tag is
762 			 * verified.
763 			 */
764 			exf->decrypt(swe->sw_kschedule, blk);
765 			axf->Update(&ctx, blk, len);
766 		}
767 	}
768 
769 	/* Finalize MAC */
770 	axf->Final(aalg, &ctx);
771 
772 	/* Validate tag */
773 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
774 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
775 		    uaalg);
776 
777 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
778 		if (r != 0)
779 			return (EBADMSG);
780 
781 		/* tag matches, decrypt data */
782 		exf->reinit(swe->sw_kschedule, iv);
783 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
784 			len = MIN(crp->crp_payload_length - i, blksz);
785 			if (len < blksz)
786 				bzero(blk, blksz);
787 			crypto_copydata(crp, crp->crp_payload_start + i, len,
788 			    blk);
789 			exf->decrypt(swe->sw_kschedule, blk);
790 			crypto_copyback(crp, crp->crp_payload_start + i, len,
791 			    blk);
792 		}
793 	} else {
794 		/* Inject the authentication data */
795 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
796 		    aalg);
797 	}
798 
799 	return (0);
800 }
801 
802 /*
803  * Apply a cipher and a digest to perform EtA.
804  */
805 static int
806 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
807 {
808 	int error;
809 
810 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
811 		error = swcr_encdec(ses, crp);
812 		if (error == 0)
813 			error = swcr_authcompute(ses, crp);
814 	} else {
815 		error = swcr_authcompute(ses, crp);
816 		if (error == 0)
817 			error = swcr_encdec(ses, crp);
818 	}
819 	return (error);
820 }
821 
822 /*
823  * Apply a compression/decompression algorithm
824  */
825 static int
826 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
827 {
828 	u_int8_t *data, *out;
829 	struct comp_algo *cxf;
830 	int adj;
831 	u_int32_t result;
832 
833 	cxf = ses->swcr_compdec.sw_cxf;
834 
835 	/* We must handle the whole buffer of data in one time
836 	 * then if there is not all the data in the mbuf, we must
837 	 * copy in a buffer.
838 	 */
839 
840 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
841 	if (data == NULL)
842 		return (EINVAL);
843 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
844 	    data);
845 
846 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
847 		result = cxf->compress(data, crp->crp_payload_length, &out);
848 	else
849 		result = cxf->decompress(data, crp->crp_payload_length, &out);
850 
851 	free(data, M_CRYPTO_DATA);
852 	if (result == 0)
853 		return (EINVAL);
854 	crp->crp_olen = result;
855 
856 	/* Check the compressed size when doing compression */
857 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
858 		if (result >= crp->crp_payload_length) {
859 			/* Compression was useless, we lost time */
860 			free(out, M_CRYPTO_DATA);
861 			return (0);
862 		}
863 	}
864 
865 	/* Copy back the (de)compressed data. m_copyback is
866 	 * extending the mbuf as necessary.
867 	 */
868 	crypto_copyback(crp, crp->crp_payload_start, result, out);
869 	if (result < crp->crp_payload_length) {
870 		switch (crp->crp_buf_type) {
871 		case CRYPTO_BUF_MBUF:
872 			adj = result - crp->crp_payload_length;
873 			m_adj(crp->crp_mbuf, adj);
874 			break;
875 		case CRYPTO_BUF_UIO: {
876 			struct uio *uio = crp->crp_uio;
877 			int ind;
878 
879 			adj = crp->crp_payload_length - result;
880 			ind = uio->uio_iovcnt - 1;
881 
882 			while (adj > 0 && ind >= 0) {
883 				if (adj < uio->uio_iov[ind].iov_len) {
884 					uio->uio_iov[ind].iov_len -= adj;
885 					break;
886 				}
887 
888 				adj -= uio->uio_iov[ind].iov_len;
889 				uio->uio_iov[ind].iov_len = 0;
890 				ind--;
891 				uio->uio_iovcnt--;
892 			}
893 			}
894 			break;
895 		}
896 	}
897 	free(out, M_CRYPTO_DATA);
898 	return 0;
899 }
900 
901 static int
902 swcr_setup_encdec(struct swcr_session *ses,
903     const struct crypto_session_params *csp)
904 {
905 	struct swcr_encdec *swe;
906 	struct enc_xform *txf;
907 	int error;
908 
909 	swe = &ses->swcr_encdec;
910 	txf = crypto_cipher(csp);
911 	MPASS(txf->ivsize == csp->csp_ivlen);
912 	if (csp->csp_cipher_key != NULL) {
913 		error = txf->setkey(&swe->sw_kschedule,
914 		    csp->csp_cipher_key, csp->csp_cipher_klen);
915 		if (error)
916 			return (error);
917 	}
918 	swe->sw_exf = txf;
919 	return (0);
920 }
921 
922 static int
923 swcr_setup_auth(struct swcr_session *ses,
924     const struct crypto_session_params *csp)
925 {
926 	struct swcr_auth *swa;
927 	struct auth_hash *axf;
928 
929 	swa = &ses->swcr_auth;
930 
931 	axf = crypto_auth_hash(csp);
932 	swa->sw_axf = axf;
933 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
934 		return (EINVAL);
935 	if (csp->csp_auth_mlen == 0)
936 		swa->sw_mlen = axf->hashsize;
937 	else
938 		swa->sw_mlen = csp->csp_auth_mlen;
939 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
940 	if (swa->sw_ictx == NULL)
941 		return (ENOBUFS);
942 
943 	switch (csp->csp_auth_alg) {
944 	case CRYPTO_MD5_HMAC:
945 	case CRYPTO_SHA1_HMAC:
946 	case CRYPTO_SHA2_224_HMAC:
947 	case CRYPTO_SHA2_256_HMAC:
948 	case CRYPTO_SHA2_384_HMAC:
949 	case CRYPTO_SHA2_512_HMAC:
950 	case CRYPTO_NULL_HMAC:
951 	case CRYPTO_RIPEMD160_HMAC:
952 		swa->sw_octx_len = axf->ctxsize;
953 		swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
954 		    M_NOWAIT);
955 		if (swa->sw_octx == NULL)
956 			return (ENOBUFS);
957 
958 		if (csp->csp_auth_key != NULL) {
959 			swcr_authprepare(axf, swa, csp->csp_auth_key,
960 			    csp->csp_auth_klen);
961 		}
962 
963 		if (csp->csp_mode == CSP_MODE_DIGEST)
964 			ses->swcr_process = swcr_authcompute;
965 		break;
966 	case CRYPTO_MD5_KPDK:
967 	case CRYPTO_SHA1_KPDK:
968 		swa->sw_octx_len = csp->csp_auth_klen;
969 		swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
970 		    M_NOWAIT);
971 		if (swa->sw_octx == NULL)
972 			return (ENOBUFS);
973 
974 		/* Store the key so we can "append" it to the payload */
975 		if (csp->csp_auth_key != NULL) {
976 			swcr_authprepare(axf, swa, csp->csp_auth_key,
977 			    csp->csp_auth_klen);
978 		}
979 
980 		if (csp->csp_mode == CSP_MODE_DIGEST)
981 			ses->swcr_process = swcr_authcompute;
982 		break;
983 #ifdef notdef
984 	case CRYPTO_MD5:
985 #endif
986 	case CRYPTO_SHA1:
987 	case CRYPTO_SHA2_224:
988 	case CRYPTO_SHA2_256:
989 	case CRYPTO_SHA2_384:
990 	case CRYPTO_SHA2_512:
991 		axf->Init(swa->sw_ictx);
992 		if (csp->csp_mode == CSP_MODE_DIGEST)
993 			ses->swcr_process = swcr_authcompute;
994 		break;
995 	case CRYPTO_AES_NIST_GMAC:
996 		axf->Init(swa->sw_ictx);
997 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
998 		    csp->csp_auth_klen);
999 		if (csp->csp_mode == CSP_MODE_DIGEST)
1000 			ses->swcr_process = swcr_gmac;
1001 		break;
1002 	case CRYPTO_POLY1305:
1003 	case CRYPTO_BLAKE2B:
1004 	case CRYPTO_BLAKE2S:
1005 		/*
1006 		 * Blake2b and Blake2s support an optional key but do
1007 		 * not require one.
1008 		 */
1009 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
1010 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1011 			    csp->csp_auth_klen);
1012 		axf->Init(swa->sw_ictx);
1013 		if (csp->csp_mode == CSP_MODE_DIGEST)
1014 			ses->swcr_process = swcr_authcompute;
1015 		break;
1016 	case CRYPTO_AES_CCM_CBC_MAC:
1017 		axf->Init(swa->sw_ictx);
1018 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1019 		    csp->csp_auth_klen);
1020 		if (csp->csp_mode == CSP_MODE_DIGEST)
1021 			ses->swcr_process = swcr_ccm_cbc_mac;
1022 		break;
1023 	}
1024 
1025 	return (0);
1026 }
1027 
1028 static int
1029 swcr_setup_gcm(struct swcr_session *ses,
1030     const struct crypto_session_params *csp)
1031 {
1032 	struct swcr_encdec *swe;
1033 	struct swcr_auth *swa;
1034 	struct enc_xform *txf;
1035 	struct auth_hash *axf;
1036 	int error;
1037 
1038 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
1039 		return (EINVAL);
1040 
1041 	/* First, setup the auth side. */
1042 	swa = &ses->swcr_auth;
1043 	switch (csp->csp_cipher_klen * 8) {
1044 	case 128:
1045 		axf = &auth_hash_nist_gmac_aes_128;
1046 		break;
1047 	case 192:
1048 		axf = &auth_hash_nist_gmac_aes_192;
1049 		break;
1050 	case 256:
1051 		axf = &auth_hash_nist_gmac_aes_256;
1052 		break;
1053 	default:
1054 		return (EINVAL);
1055 	}
1056 	swa->sw_axf = axf;
1057 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1058 		return (EINVAL);
1059 	if (csp->csp_auth_mlen == 0)
1060 		swa->sw_mlen = axf->hashsize;
1061 	else
1062 		swa->sw_mlen = csp->csp_auth_mlen;
1063 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1064 	if (swa->sw_ictx == NULL)
1065 		return (ENOBUFS);
1066 	axf->Init(swa->sw_ictx);
1067 	if (csp->csp_cipher_key != NULL)
1068 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1069 		    csp->csp_cipher_klen);
1070 
1071 	/* Second, setup the cipher side. */
1072 	swe = &ses->swcr_encdec;
1073 	txf = &enc_xform_aes_nist_gcm;
1074 	if (csp->csp_cipher_key != NULL) {
1075 		error = txf->setkey(&swe->sw_kschedule,
1076 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1077 		if (error)
1078 			return (error);
1079 	}
1080 	swe->sw_exf = txf;
1081 
1082 	return (0);
1083 }
1084 
1085 static int
1086 swcr_setup_ccm(struct swcr_session *ses,
1087     const struct crypto_session_params *csp)
1088 {
1089 	struct swcr_encdec *swe;
1090 	struct swcr_auth *swa;
1091 	struct enc_xform *txf;
1092 	struct auth_hash *axf;
1093 	int error;
1094 
1095 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1096 		return (EINVAL);
1097 
1098 	/* First, setup the auth side. */
1099 	swa = &ses->swcr_auth;
1100 	switch (csp->csp_cipher_klen * 8) {
1101 	case 128:
1102 		axf = &auth_hash_ccm_cbc_mac_128;
1103 		break;
1104 	case 192:
1105 		axf = &auth_hash_ccm_cbc_mac_192;
1106 		break;
1107 	case 256:
1108 		axf = &auth_hash_ccm_cbc_mac_256;
1109 		break;
1110 	default:
1111 		return (EINVAL);
1112 	}
1113 	swa->sw_axf = axf;
1114 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1115 		return (EINVAL);
1116 	if (csp->csp_auth_mlen == 0)
1117 		swa->sw_mlen = axf->hashsize;
1118 	else
1119 		swa->sw_mlen = csp->csp_auth_mlen;
1120 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1121 	if (swa->sw_ictx == NULL)
1122 		return (ENOBUFS);
1123 	axf->Init(swa->sw_ictx);
1124 	if (csp->csp_cipher_key != NULL)
1125 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1126 		    csp->csp_cipher_klen);
1127 
1128 	/* Second, setup the cipher side. */
1129 	swe = &ses->swcr_encdec;
1130 	txf = &enc_xform_ccm;
1131 	if (csp->csp_cipher_key != NULL) {
1132 		error = txf->setkey(&swe->sw_kschedule,
1133 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1134 		if (error)
1135 			return (error);
1136 	}
1137 	swe->sw_exf = txf;
1138 
1139 	return (0);
1140 }
1141 
1142 static bool
1143 swcr_auth_supported(const struct crypto_session_params *csp)
1144 {
1145 	struct auth_hash *axf;
1146 
1147 	axf = crypto_auth_hash(csp);
1148 	if (axf == NULL)
1149 		return (false);
1150 	switch (csp->csp_auth_alg) {
1151 	case CRYPTO_MD5_HMAC:
1152 	case CRYPTO_SHA1_HMAC:
1153 	case CRYPTO_SHA2_224_HMAC:
1154 	case CRYPTO_SHA2_256_HMAC:
1155 	case CRYPTO_SHA2_384_HMAC:
1156 	case CRYPTO_SHA2_512_HMAC:
1157 	case CRYPTO_NULL_HMAC:
1158 	case CRYPTO_RIPEMD160_HMAC:
1159 	case CRYPTO_MD5_KPDK:
1160 	case CRYPTO_SHA1_KPDK:
1161 		break;
1162 	case CRYPTO_AES_NIST_GMAC:
1163 		switch (csp->csp_auth_klen * 8) {
1164 		case 128:
1165 		case 192:
1166 		case 256:
1167 			break;
1168 		default:
1169 			return (false);
1170 		}
1171 		if (csp->csp_auth_key == NULL)
1172 			return (false);
1173 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1174 			return (false);
1175 		break;
1176 	case CRYPTO_POLY1305:
1177 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1178 			return (false);
1179 		break;
1180 	case CRYPTO_AES_CCM_CBC_MAC:
1181 		switch (csp->csp_auth_klen * 8) {
1182 		case 128:
1183 		case 192:
1184 		case 256:
1185 			break;
1186 		default:
1187 			return (false);
1188 		}
1189 		if (csp->csp_auth_key == NULL)
1190 			return (false);
1191 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1192 			return (false);
1193 		break;
1194 	}
1195 	return (true);
1196 }
1197 
1198 static bool
1199 swcr_cipher_supported(const struct crypto_session_params *csp)
1200 {
1201 	struct enc_xform *txf;
1202 
1203 	txf = crypto_cipher(csp);
1204 	if (txf == NULL)
1205 		return (false);
1206 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1207 	    txf->ivsize != csp->csp_ivlen)
1208 		return (false);
1209 	return (true);
1210 }
1211 
1212 static int
1213 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1214 {
1215 
1216 	if (csp->csp_flags != 0)
1217 		return (EINVAL);
1218 	switch (csp->csp_mode) {
1219 	case CSP_MODE_COMPRESS:
1220 		switch (csp->csp_cipher_alg) {
1221 		case CRYPTO_DEFLATE_COMP:
1222 			break;
1223 		default:
1224 			return (EINVAL);
1225 		}
1226 		break;
1227 	case CSP_MODE_CIPHER:
1228 		switch (csp->csp_cipher_alg) {
1229 		case CRYPTO_AES_NIST_GCM_16:
1230 		case CRYPTO_AES_CCM_16:
1231 			return (EINVAL);
1232 		default:
1233 			if (!swcr_cipher_supported(csp))
1234 				return (EINVAL);
1235 			break;
1236 		}
1237 		break;
1238 	case CSP_MODE_DIGEST:
1239 		if (!swcr_auth_supported(csp))
1240 			return (EINVAL);
1241 		break;
1242 	case CSP_MODE_AEAD:
1243 		switch (csp->csp_cipher_alg) {
1244 		case CRYPTO_AES_NIST_GCM_16:
1245 		case CRYPTO_AES_CCM_16:
1246 			break;
1247 		default:
1248 			return (EINVAL);
1249 		}
1250 		break;
1251 	case CSP_MODE_ETA:
1252 		/* AEAD algorithms cannot be used for EtA. */
1253 		switch (csp->csp_cipher_alg) {
1254 		case CRYPTO_AES_NIST_GCM_16:
1255 		case CRYPTO_AES_CCM_16:
1256 			return (EINVAL);
1257 		}
1258 		switch (csp->csp_auth_alg) {
1259 		case CRYPTO_AES_NIST_GMAC:
1260 		case CRYPTO_AES_CCM_CBC_MAC:
1261 			return (EINVAL);
1262 		}
1263 
1264 		if (!swcr_cipher_supported(csp) ||
1265 		    !swcr_auth_supported(csp))
1266 			return (EINVAL);
1267 		break;
1268 	default:
1269 		return (EINVAL);
1270 	}
1271 
1272 	return (CRYPTODEV_PROBE_SOFTWARE);
1273 }
1274 
1275 /*
1276  * Generate a new software session.
1277  */
1278 static int
1279 swcr_newsession(device_t dev, crypto_session_t cses,
1280     const struct crypto_session_params *csp)
1281 {
1282 	struct swcr_session *ses;
1283 	struct swcr_encdec *swe;
1284 	struct swcr_auth *swa;
1285 	struct comp_algo *cxf;
1286 	int error;
1287 
1288 	ses = crypto_get_driver_session(cses);
1289 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1290 
1291 	error = 0;
1292 	swe = &ses->swcr_encdec;
1293 	swa = &ses->swcr_auth;
1294 	switch (csp->csp_mode) {
1295 	case CSP_MODE_COMPRESS:
1296 		switch (csp->csp_cipher_alg) {
1297 		case CRYPTO_DEFLATE_COMP:
1298 			cxf = &comp_algo_deflate;
1299 			break;
1300 #ifdef INVARIANTS
1301 		default:
1302 			panic("bad compression algo");
1303 #endif
1304 		}
1305 		ses->swcr_compdec.sw_cxf = cxf;
1306 		ses->swcr_process = swcr_compdec;
1307 		break;
1308 	case CSP_MODE_CIPHER:
1309 		switch (csp->csp_cipher_alg) {
1310 		case CRYPTO_NULL_CBC:
1311 			ses->swcr_process = swcr_null;
1312 			break;
1313 #ifdef INVARIANTS
1314 		case CRYPTO_AES_NIST_GCM_16:
1315 		case CRYPTO_AES_CCM_16:
1316 			panic("bad cipher algo");
1317 #endif
1318 		default:
1319 			error = swcr_setup_encdec(ses, csp);
1320 			if (error == 0)
1321 				ses->swcr_process = swcr_encdec;
1322 		}
1323 		break;
1324 	case CSP_MODE_DIGEST:
1325 		error = swcr_setup_auth(ses, csp);
1326 		break;
1327 	case CSP_MODE_AEAD:
1328 		switch (csp->csp_cipher_alg) {
1329 		case CRYPTO_AES_NIST_GCM_16:
1330 			error = swcr_setup_gcm(ses, csp);
1331 			if (error == 0)
1332 				ses->swcr_process = swcr_gcm;
1333 			break;
1334 		case CRYPTO_AES_CCM_16:
1335 			error = swcr_setup_ccm(ses, csp);
1336 			if (error == 0)
1337 				ses->swcr_process = swcr_ccm;
1338 			break;
1339 #ifdef INVARIANTS
1340 		default:
1341 			panic("bad aead algo");
1342 #endif
1343 		}
1344 		break;
1345 	case CSP_MODE_ETA:
1346 #ifdef INVARIANTS
1347 		switch (csp->csp_cipher_alg) {
1348 		case CRYPTO_AES_NIST_GCM_16:
1349 		case CRYPTO_AES_CCM_16:
1350 			panic("bad eta cipher algo");
1351 		}
1352 		switch (csp->csp_auth_alg) {
1353 		case CRYPTO_AES_NIST_GMAC:
1354 		case CRYPTO_AES_CCM_CBC_MAC:
1355 			panic("bad eta auth algo");
1356 		}
1357 #endif
1358 
1359 		error = swcr_setup_auth(ses, csp);
1360 		if (error)
1361 			break;
1362 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1363 			/* Effectively degrade to digest mode. */
1364 			ses->swcr_process = swcr_authcompute;
1365 			break;
1366 		}
1367 
1368 		error = swcr_setup_encdec(ses, csp);
1369 		if (error == 0)
1370 			ses->swcr_process = swcr_eta;
1371 		break;
1372 	default:
1373 		error = EINVAL;
1374 	}
1375 
1376 	if (error)
1377 		swcr_freesession(dev, cses);
1378 	return (error);
1379 }
1380 
1381 static void
1382 swcr_freesession(device_t dev, crypto_session_t cses)
1383 {
1384 	struct swcr_session *ses;
1385 	struct swcr_auth *swa;
1386 	struct enc_xform *txf;
1387 	struct auth_hash *axf;
1388 
1389 	ses = crypto_get_driver_session(cses);
1390 
1391 	mtx_destroy(&ses->swcr_lock);
1392 
1393 	txf = ses->swcr_encdec.sw_exf;
1394 	if (txf != NULL) {
1395 		if (ses->swcr_encdec.sw_kschedule != NULL)
1396 			txf->zerokey(&(ses->swcr_encdec.sw_kschedule));
1397 	}
1398 
1399 	axf = ses->swcr_auth.sw_axf;
1400 	if (axf != NULL) {
1401 		swa = &ses->swcr_auth;
1402 		if (swa->sw_ictx != NULL) {
1403 			explicit_bzero(swa->sw_ictx, axf->ctxsize);
1404 			free(swa->sw_ictx, M_CRYPTO_DATA);
1405 		}
1406 		if (swa->sw_octx != NULL) {
1407 			explicit_bzero(swa->sw_octx, swa->sw_octx_len);
1408 			free(swa->sw_octx, M_CRYPTO_DATA);
1409 		}
1410 	}
1411 }
1412 
1413 /*
1414  * Process a software request.
1415  */
1416 static int
1417 swcr_process(device_t dev, struct cryptop *crp, int hint)
1418 {
1419 	struct swcr_session *ses;
1420 
1421 	ses = crypto_get_driver_session(crp->crp_session);
1422 	mtx_lock(&ses->swcr_lock);
1423 
1424 	crp->crp_etype = ses->swcr_process(ses, crp);
1425 
1426 	mtx_unlock(&ses->swcr_lock);
1427 	crypto_done(crp);
1428 	return (0);
1429 }
1430 
1431 static void
1432 swcr_identify(driver_t *drv, device_t parent)
1433 {
1434 	/* NB: order 10 is so we get attached after h/w devices */
1435 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1436 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1437 		panic("cryptosoft: could not attach");
1438 }
1439 
1440 static int
1441 swcr_probe(device_t dev)
1442 {
1443 	device_set_desc(dev, "software crypto");
1444 	return (BUS_PROBE_NOWILDCARD);
1445 }
1446 
1447 static int
1448 swcr_attach(device_t dev)
1449 {
1450 
1451 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1452 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1453 	if (swcr_id < 0) {
1454 		device_printf(dev, "cannot initialize!");
1455 		return (ENXIO);
1456 	}
1457 
1458 	return (0);
1459 }
1460 
1461 static int
1462 swcr_detach(device_t dev)
1463 {
1464 	crypto_unregister_all(swcr_id);
1465 	return 0;
1466 }
1467 
1468 static device_method_t swcr_methods[] = {
1469 	DEVMETHOD(device_identify,	swcr_identify),
1470 	DEVMETHOD(device_probe,		swcr_probe),
1471 	DEVMETHOD(device_attach,	swcr_attach),
1472 	DEVMETHOD(device_detach,	swcr_detach),
1473 
1474 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1475 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1476 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1477 	DEVMETHOD(cryptodev_process,	swcr_process),
1478 
1479 	{0, 0},
1480 };
1481 
1482 static driver_t swcr_driver = {
1483 	"cryptosoft",
1484 	swcr_methods,
1485 	0,		/* NB: no softc */
1486 };
1487 static devclass_t swcr_devclass;
1488 
1489 /*
1490  * NB: We explicitly reference the crypto module so we
1491  * get the necessary ordering when built as a loadable
1492  * module.  This is required because we bundle the crypto
1493  * module code together with the cryptosoft driver (otherwise
1494  * normal module dependencies would handle things).
1495  */
1496 extern int crypto_modevent(struct module *, int, void *);
1497 /* XXX where to attach */
1498 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1499 MODULE_VERSION(cryptosoft, 1);
1500 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1501