xref: /freebsd/sys/opencrypto/cryptosoft.c (revision d65cd7a57bf0600b722afc770838a5d0c1c3a8e1)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 };
66 
67 struct swcr_encdec {
68 	void		*sw_kschedule;
69 	struct enc_xform *sw_exf;
70 };
71 
72 struct swcr_compdec {
73 	struct comp_algo *sw_cxf;
74 };
75 
76 struct swcr_session {
77 	struct mtx	swcr_lock;
78 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	const struct crypto_session_params *csp;
106 	struct swcr_encdec *sw;
107 	struct enc_xform *exf;
108 	int i, j, k, blks, ind, count, ivlen;
109 	struct uio *uio, uiolcl;
110 	struct iovec iovlcl[4];
111 	struct iovec *iov;
112 	int iovcnt, iovalloc;
113 	int error;
114 	bool encrypting;
115 
116 	error = 0;
117 
118 	sw = &ses->swcr_encdec;
119 	exf = sw->sw_exf;
120 	ivlen = exf->ivsize;
121 
122 	if (exf->native_blocksize == 0) {
123 		/* Check for non-padded data */
124 		if ((crp->crp_payload_length % exf->blocksize) != 0)
125 			return (EINVAL);
126 
127 		blks = exf->blocksize;
128 	} else
129 		blks = exf->native_blocksize;
130 
131 	if (exf == &enc_xform_aes_icm &&
132 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
133 		return (EINVAL);
134 
135 	crypto_read_iv(crp, iv);
136 
137 	if (crp->crp_cipher_key != NULL) {
138 		csp = crypto_get_params(crp->crp_session);
139 		error = exf->setkey(sw->sw_kschedule,
140 		    crp->crp_cipher_key, csp->csp_cipher_klen);
141 		if (error)
142 			return (error);
143 	}
144 
145 	iov = iovlcl;
146 	iovcnt = nitems(iovlcl);
147 	iovalloc = 0;
148 	uio = &uiolcl;
149 	switch (crp->crp_buf_type) {
150 	case CRYPTO_BUF_MBUF:
151 		error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt,
152 		    &iovalloc);
153 		if (error)
154 			return (error);
155 		uio->uio_iov = iov;
156 		uio->uio_iovcnt = iovcnt;
157 		break;
158 	case CRYPTO_BUF_UIO:
159 		uio = crp->crp_uio;
160 		break;
161 	case CRYPTO_BUF_CONTIG:
162 		iov[0].iov_base = crp->crp_buf;
163 		iov[0].iov_len = crp->crp_ilen;
164 		uio->uio_iov = iov;
165 		uio->uio_iovcnt = 1;
166 		break;
167 	}
168 
169 	ivp = iv;
170 
171 	if (exf->reinit) {
172 		/*
173 		 * xforms that provide a reinit method perform all IV
174 		 * handling themselves.
175 		 */
176 		exf->reinit(sw->sw_kschedule, iv);
177 	}
178 
179 	count = crp->crp_payload_start;
180 	ind = cuio_getptr(uio, count, &k);
181 	if (ind == -1) {
182 		error = EINVAL;
183 		goto out;
184 	}
185 
186 	i = crp->crp_payload_length;
187 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
188 
189 	while (i >= blks) {
190 		/*
191 		 * If there's insufficient data at the end of
192 		 * an iovec, we have to do some copying.
193 		 */
194 		if (uio->uio_iov[ind].iov_len < k + blks &&
195 		    uio->uio_iov[ind].iov_len != k) {
196 			cuio_copydata(uio, count, blks, blk);
197 
198 			/* Actual encryption/decryption */
199 			if (exf->reinit) {
200 				if (encrypting) {
201 					exf->encrypt(sw->sw_kschedule, blk,
202 					    blk);
203 				} else {
204 					exf->decrypt(sw->sw_kschedule, blk,
205 					    blk);
206 				}
207 			} else if (encrypting) {
208 				/* XOR with previous block */
209 				for (j = 0; j < blks; j++)
210 					blk[j] ^= ivp[j];
211 
212 				exf->encrypt(sw->sw_kschedule, blk, blk);
213 
214 				/*
215 				 * Keep encrypted block for XOR'ing
216 				 * with next block
217 				 */
218 				bcopy(blk, iv, blks);
219 				ivp = iv;
220 			} else {	/* decrypt */
221 				/*
222 				 * Keep encrypted block for XOR'ing
223 				 * with next block
224 				 */
225 				nivp = (ivp == iv) ? iv2 : iv;
226 				bcopy(blk, nivp, blks);
227 
228 				exf->decrypt(sw->sw_kschedule, blk, blk);
229 
230 				/* XOR with previous block */
231 				for (j = 0; j < blks; j++)
232 					blk[j] ^= ivp[j];
233 
234 				ivp = nivp;
235 			}
236 
237 			/* Copy back decrypted block */
238 			cuio_copyback(uio, count, blks, blk);
239 
240 			count += blks;
241 
242 			/* Advance pointer */
243 			ind = cuio_getptr(uio, count, &k);
244 			if (ind == -1) {
245 				error = EINVAL;
246 				goto out;
247 			}
248 
249 			i -= blks;
250 
251 			/* Could be done... */
252 			if (i == 0)
253 				break;
254 		}
255 
256 		while (uio->uio_iov[ind].iov_len >= k + blks && i >= blks) {
257 			uint8_t *idat;
258 
259 			idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
260 
261 			if (exf->reinit) {
262 				if (encrypting)
263 					exf->encrypt(sw->sw_kschedule,
264 					    idat, idat);
265 				else
266 					exf->decrypt(sw->sw_kschedule,
267 					    idat, idat);
268 			} else if (encrypting) {
269 				/* XOR with previous block/IV */
270 				for (j = 0; j < blks; j++)
271 					idat[j] ^= ivp[j];
272 
273 				exf->encrypt(sw->sw_kschedule, idat, idat);
274 				ivp = idat;
275 			} else {	/* decrypt */
276 				/*
277 				 * Keep encrypted block to be used
278 				 * in next block's processing.
279 				 */
280 				nivp = (ivp == iv) ? iv2 : iv;
281 				bcopy(idat, nivp, blks);
282 
283 				exf->decrypt(sw->sw_kschedule, idat, idat);
284 
285 				/* XOR with previous block/IV */
286 				for (j = 0; j < blks; j++)
287 					idat[j] ^= ivp[j];
288 
289 				ivp = nivp;
290 			}
291 
292 			count += blks;
293 			k += blks;
294 			i -= blks;
295 		}
296 
297 		/*
298 		 * Advance to the next iov if the end of the current iov
299 		 * is aligned with the end of a cipher block.
300 		 * Note that the code is equivalent to calling:
301 		 *      ind = cuio_getptr(uio, count, &k);
302 		 */
303 		if (i > 0 && k == uio->uio_iov[ind].iov_len) {
304 			k = 0;
305 			ind++;
306 			if (ind >= uio->uio_iovcnt) {
307 				error = EINVAL;
308 				goto out;
309 			}
310 		}
311 	}
312 
313 	/* Handle trailing partial block for stream ciphers. */
314 	if (i > 0) {
315 		KASSERT(exf->native_blocksize != 0,
316 		    ("%s: partial block of %d bytes for cipher %s",
317 		    __func__, i, exf->name));
318 		KASSERT(exf->reinit != NULL,
319 		    ("%s: partial block cipher %s without reinit hook",
320 		    __func__, exf->name));
321 		KASSERT(i < blks, ("%s: partial block too big", __func__));
322 
323 		cuio_copydata(uio, count, i, blk);
324 		if (encrypting) {
325 			exf->encrypt_last(sw->sw_kschedule, blk, blk, i);
326 		} else {
327 			exf->decrypt_last(sw->sw_kschedule, blk, blk, i);
328 		}
329 		cuio_copyback(uio, count, i, blk);
330 	}
331 
332 out:
333 	if (iovalloc)
334 		free(iov, M_CRYPTO_DATA);
335 
336 	return (error);
337 }
338 
339 static void
340 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
341     const uint8_t *key, int klen)
342 {
343 
344 	switch (axf->type) {
345 	case CRYPTO_SHA1_HMAC:
346 	case CRYPTO_SHA2_224_HMAC:
347 	case CRYPTO_SHA2_256_HMAC:
348 	case CRYPTO_SHA2_384_HMAC:
349 	case CRYPTO_SHA2_512_HMAC:
350 	case CRYPTO_NULL_HMAC:
351 	case CRYPTO_RIPEMD160_HMAC:
352 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
353 		hmac_init_opad(axf, key, klen, sw->sw_octx);
354 		break;
355 	case CRYPTO_POLY1305:
356 	case CRYPTO_BLAKE2B:
357 	case CRYPTO_BLAKE2S:
358 		axf->Setkey(sw->sw_ictx, key, klen);
359 		axf->Init(sw->sw_ictx);
360 		break;
361 	default:
362 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
363 	}
364 }
365 
366 /*
367  * Compute or verify hash.
368  */
369 static int
370 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
371 {
372 	u_char aalg[HASH_MAX_LEN];
373 	u_char uaalg[HASH_MAX_LEN];
374 	const struct crypto_session_params *csp;
375 	struct swcr_auth *sw;
376 	struct auth_hash *axf;
377 	union authctx ctx;
378 	int err;
379 
380 	sw = &ses->swcr_auth;
381 
382 	axf = sw->sw_axf;
383 
384 	if (crp->crp_auth_key != NULL) {
385 		csp = crypto_get_params(crp->crp_session);
386 		swcr_authprepare(axf, sw, crp->crp_auth_key,
387 		    csp->csp_auth_klen);
388 	}
389 
390 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
391 
392 	err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
393 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
394 	if (err)
395 		return err;
396 
397 	err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
398 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
399 	if (err)
400 		return err;
401 
402 	switch (axf->type) {
403 	case CRYPTO_SHA1:
404 	case CRYPTO_SHA2_224:
405 	case CRYPTO_SHA2_256:
406 	case CRYPTO_SHA2_384:
407 	case CRYPTO_SHA2_512:
408 		axf->Final(aalg, &ctx);
409 		break;
410 
411 	case CRYPTO_SHA1_HMAC:
412 	case CRYPTO_SHA2_224_HMAC:
413 	case CRYPTO_SHA2_256_HMAC:
414 	case CRYPTO_SHA2_384_HMAC:
415 	case CRYPTO_SHA2_512_HMAC:
416 	case CRYPTO_RIPEMD160_HMAC:
417 		if (sw->sw_octx == NULL)
418 			return EINVAL;
419 
420 		axf->Final(aalg, &ctx);
421 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
422 		axf->Update(&ctx, aalg, axf->hashsize);
423 		axf->Final(aalg, &ctx);
424 		break;
425 
426 	case CRYPTO_BLAKE2B:
427 	case CRYPTO_BLAKE2S:
428 	case CRYPTO_NULL_HMAC:
429 	case CRYPTO_POLY1305:
430 		axf->Final(aalg, &ctx);
431 		break;
432 	}
433 
434 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
435 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
436 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
437 			return (EBADMSG);
438 	} else {
439 		/* Inject the authentication data */
440 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
441 	}
442 	return (0);
443 }
444 
445 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
446 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
447 
448 static int
449 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
450 {
451 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
452 	u_char *blk = (u_char *)blkbuf;
453 	u_char aalg[AALG_MAX_RESULT_LEN];
454 	u_char uaalg[AALG_MAX_RESULT_LEN];
455 	u_char iv[EALG_MAX_BLOCK_LEN];
456 	union authctx ctx;
457 	struct swcr_auth *swa;
458 	struct auth_hash *axf;
459 	uint32_t *blkp;
460 	int blksz, i, ivlen, len;
461 
462 	swa = &ses->swcr_auth;
463 	axf = swa->sw_axf;
464 
465 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
466 	blksz = axf->blocksize;
467 
468 	/* Initialize the IV */
469 	ivlen = AES_GCM_IV_LEN;
470 	crypto_read_iv(crp, iv);
471 
472 	axf->Reinit(&ctx, iv, ivlen);
473 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
474 		len = MIN(crp->crp_payload_length - i, blksz);
475 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
476 		bzero(blk + len, blksz - len);
477 		axf->Update(&ctx, blk, blksz);
478 	}
479 
480 	/* length block */
481 	bzero(blk, blksz);
482 	blkp = (uint32_t *)blk + 1;
483 	*blkp = htobe32(crp->crp_payload_length * 8);
484 	axf->Update(&ctx, blk, blksz);
485 
486 	/* Finalize MAC */
487 	axf->Final(aalg, &ctx);
488 
489 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
490 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
491 		    uaalg);
492 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
493 			return (EBADMSG);
494 	} else {
495 		/* Inject the authentication data */
496 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
497 	}
498 	return (0);
499 }
500 
501 static int
502 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
503 {
504 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
505 	u_char *blk = (u_char *)blkbuf;
506 	u_char aalg[AALG_MAX_RESULT_LEN];
507 	u_char uaalg[AALG_MAX_RESULT_LEN];
508 	u_char iv[EALG_MAX_BLOCK_LEN];
509 	union authctx ctx;
510 	struct swcr_auth *swa;
511 	struct swcr_encdec *swe;
512 	struct auth_hash *axf;
513 	struct enc_xform *exf;
514 	uint32_t *blkp;
515 	int blksz, i, ivlen, len, r;
516 
517 	swa = &ses->swcr_auth;
518 	axf = swa->sw_axf;
519 
520 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
521 	blksz = axf->blocksize;
522 
523 	swe = &ses->swcr_encdec;
524 	exf = swe->sw_exf;
525 	KASSERT(axf->blocksize == exf->native_blocksize,
526 	    ("%s: blocksize mismatch", __func__));
527 
528 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
529 		return (EINVAL);
530 
531 	/* Initialize the IV */
532 	ivlen = AES_GCM_IV_LEN;
533 	bcopy(crp->crp_iv, iv, ivlen);
534 
535 	/* Supply MAC with IV */
536 	axf->Reinit(&ctx, iv, ivlen);
537 
538 	/* Supply MAC with AAD */
539 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
540 		len = MIN(crp->crp_aad_length - i, blksz);
541 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
542 		bzero(blk + len, blksz - len);
543 		axf->Update(&ctx, blk, blksz);
544 	}
545 
546 	exf->reinit(swe->sw_kschedule, iv);
547 
548 	/* Do encryption with MAC */
549 	for (i = 0; i < crp->crp_payload_length; i += len) {
550 		len = MIN(crp->crp_payload_length - i, blksz);
551 		if (len < blksz)
552 			bzero(blk, blksz);
553 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
554 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
555 			exf->encrypt(swe->sw_kschedule, blk, blk);
556 			axf->Update(&ctx, blk, len);
557 			crypto_copyback(crp, crp->crp_payload_start + i, len,
558 			    blk);
559 		} else {
560 			axf->Update(&ctx, blk, len);
561 		}
562 	}
563 
564 	/* length block */
565 	bzero(blk, blksz);
566 	blkp = (uint32_t *)blk + 1;
567 	*blkp = htobe32(crp->crp_aad_length * 8);
568 	blkp = (uint32_t *)blk + 3;
569 	*blkp = htobe32(crp->crp_payload_length * 8);
570 	axf->Update(&ctx, blk, blksz);
571 
572 	/* Finalize MAC */
573 	axf->Final(aalg, &ctx);
574 
575 	/* Validate tag */
576 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
577 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
578 		    uaalg);
579 
580 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
581 		if (r != 0)
582 			return (EBADMSG);
583 
584 		/* tag matches, decrypt data */
585 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
586 			len = MIN(crp->crp_payload_length - i, blksz);
587 			if (len < blksz)
588 				bzero(blk, blksz);
589 			crypto_copydata(crp, crp->crp_payload_start + i, len,
590 			    blk);
591 			exf->decrypt(swe->sw_kschedule, blk, blk);
592 			crypto_copyback(crp, crp->crp_payload_start + i, len,
593 			    blk);
594 		}
595 	} else {
596 		/* Inject the authentication data */
597 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
598 		    aalg);
599 	}
600 
601 	return (0);
602 }
603 
604 static int
605 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
606 {
607 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
608 	u_char *blk = (u_char *)blkbuf;
609 	u_char aalg[AALG_MAX_RESULT_LEN];
610 	u_char uaalg[AALG_MAX_RESULT_LEN];
611 	u_char iv[EALG_MAX_BLOCK_LEN];
612 	union authctx ctx;
613 	struct swcr_auth *swa;
614 	struct auth_hash *axf;
615 	int blksz, i, ivlen, len;
616 
617 	swa = &ses->swcr_auth;
618 	axf = swa->sw_axf;
619 
620 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
621 	blksz = axf->blocksize;
622 
623 	/* Initialize the IV */
624 	ivlen = AES_CCM_IV_LEN;
625 	crypto_read_iv(crp, iv);
626 
627 	/*
628 	 * AES CCM-CBC-MAC needs to know the length of both the auth
629 	 * data and payload data before doing the auth computation.
630 	 */
631 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
632 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
633 
634 	axf->Reinit(&ctx, iv, ivlen);
635 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
636 		len = MIN(crp->crp_payload_length - i, blksz);
637 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
638 		bzero(blk + len, blksz - len);
639 		axf->Update(&ctx, blk, blksz);
640 	}
641 
642 	/* Finalize MAC */
643 	axf->Final(aalg, &ctx);
644 
645 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
646 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
647 		    uaalg);
648 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
649 			return (EBADMSG);
650 	} else {
651 		/* Inject the authentication data */
652 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
653 	}
654 	return (0);
655 }
656 
657 static int
658 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
659 {
660 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
661 	u_char *blk = (u_char *)blkbuf;
662 	u_char aalg[AALG_MAX_RESULT_LEN];
663 	u_char uaalg[AALG_MAX_RESULT_LEN];
664 	u_char iv[EALG_MAX_BLOCK_LEN];
665 	union authctx ctx;
666 	struct swcr_auth *swa;
667 	struct swcr_encdec *swe;
668 	struct auth_hash *axf;
669 	struct enc_xform *exf;
670 	int blksz, i, ivlen, len, r;
671 
672 	swa = &ses->swcr_auth;
673 	axf = swa->sw_axf;
674 
675 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
676 	blksz = axf->blocksize;
677 
678 	swe = &ses->swcr_encdec;
679 	exf = swe->sw_exf;
680 	KASSERT(axf->blocksize == exf->native_blocksize,
681 	    ("%s: blocksize mismatch", __func__));
682 
683 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
684 		return (EINVAL);
685 
686 	/* Initialize the IV */
687 	ivlen = AES_CCM_IV_LEN;
688 	bcopy(crp->crp_iv, iv, ivlen);
689 
690 	/*
691 	 * AES CCM-CBC-MAC needs to know the length of both the auth
692 	 * data and payload data before doing the auth computation.
693 	 */
694 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
695 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
696 
697 	/* Supply MAC with IV */
698 	axf->Reinit(&ctx, iv, ivlen);
699 
700 	/* Supply MAC with AAD */
701 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
702 		len = MIN(crp->crp_aad_length - i, blksz);
703 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
704 		bzero(blk + len, blksz - len);
705 		axf->Update(&ctx, blk, blksz);
706 	}
707 
708 	exf->reinit(swe->sw_kschedule, iv);
709 
710 	/* Do encryption/decryption with MAC */
711 	for (i = 0; i < crp->crp_payload_length; i += len) {
712 		len = MIN(crp->crp_payload_length - i, blksz);
713 		if (len < blksz)
714 			bzero(blk, blksz);
715 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
716 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
717 			axf->Update(&ctx, blk, len);
718 			exf->encrypt(swe->sw_kschedule, blk, blk);
719 			crypto_copyback(crp, crp->crp_payload_start + i, len,
720 			    blk);
721 		} else {
722 			/*
723 			 * One of the problems with CCM+CBC is that
724 			 * the authentication is done on the
725 			 * unecncrypted data.  As a result, we have to
726 			 * decrypt the data twice: once to generate
727 			 * the tag and a second time after the tag is
728 			 * verified.
729 			 */
730 			exf->decrypt(swe->sw_kschedule, blk, blk);
731 			axf->Update(&ctx, blk, len);
732 		}
733 	}
734 
735 	/* Finalize MAC */
736 	axf->Final(aalg, &ctx);
737 
738 	/* Validate tag */
739 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
740 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
741 		    uaalg);
742 
743 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
744 		if (r != 0)
745 			return (EBADMSG);
746 
747 		/* tag matches, decrypt data */
748 		exf->reinit(swe->sw_kschedule, iv);
749 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
750 			len = MIN(crp->crp_payload_length - i, blksz);
751 			if (len < blksz)
752 				bzero(blk, blksz);
753 			crypto_copydata(crp, crp->crp_payload_start + i, len,
754 			    blk);
755 			exf->decrypt(swe->sw_kschedule, blk, blk);
756 			crypto_copyback(crp, crp->crp_payload_start + i, len,
757 			    blk);
758 		}
759 	} else {
760 		/* Inject the authentication data */
761 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
762 		    aalg);
763 	}
764 
765 	return (0);
766 }
767 
768 /*
769  * Apply a cipher and a digest to perform EtA.
770  */
771 static int
772 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
773 {
774 	int error;
775 
776 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
777 		error = swcr_encdec(ses, crp);
778 		if (error == 0)
779 			error = swcr_authcompute(ses, crp);
780 	} else {
781 		error = swcr_authcompute(ses, crp);
782 		if (error == 0)
783 			error = swcr_encdec(ses, crp);
784 	}
785 	return (error);
786 }
787 
788 /*
789  * Apply a compression/decompression algorithm
790  */
791 static int
792 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
793 {
794 	u_int8_t *data, *out;
795 	struct comp_algo *cxf;
796 	int adj;
797 	u_int32_t result;
798 
799 	cxf = ses->swcr_compdec.sw_cxf;
800 
801 	/* We must handle the whole buffer of data in one time
802 	 * then if there is not all the data in the mbuf, we must
803 	 * copy in a buffer.
804 	 */
805 
806 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
807 	if (data == NULL)
808 		return (EINVAL);
809 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
810 	    data);
811 
812 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
813 		result = cxf->compress(data, crp->crp_payload_length, &out);
814 	else
815 		result = cxf->decompress(data, crp->crp_payload_length, &out);
816 
817 	free(data, M_CRYPTO_DATA);
818 	if (result == 0)
819 		return (EINVAL);
820 	crp->crp_olen = result;
821 
822 	/* Check the compressed size when doing compression */
823 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
824 		if (result >= crp->crp_payload_length) {
825 			/* Compression was useless, we lost time */
826 			free(out, M_CRYPTO_DATA);
827 			return (0);
828 		}
829 	}
830 
831 	/* Copy back the (de)compressed data. m_copyback is
832 	 * extending the mbuf as necessary.
833 	 */
834 	crypto_copyback(crp, crp->crp_payload_start, result, out);
835 	if (result < crp->crp_payload_length) {
836 		switch (crp->crp_buf_type) {
837 		case CRYPTO_BUF_MBUF:
838 			adj = result - crp->crp_payload_length;
839 			m_adj(crp->crp_mbuf, adj);
840 			break;
841 		case CRYPTO_BUF_UIO: {
842 			struct uio *uio = crp->crp_uio;
843 			int ind;
844 
845 			adj = crp->crp_payload_length - result;
846 			ind = uio->uio_iovcnt - 1;
847 
848 			while (adj > 0 && ind >= 0) {
849 				if (adj < uio->uio_iov[ind].iov_len) {
850 					uio->uio_iov[ind].iov_len -= adj;
851 					break;
852 				}
853 
854 				adj -= uio->uio_iov[ind].iov_len;
855 				uio->uio_iov[ind].iov_len = 0;
856 				ind--;
857 				uio->uio_iovcnt--;
858 			}
859 			}
860 			break;
861 		}
862 	}
863 	free(out, M_CRYPTO_DATA);
864 	return 0;
865 }
866 
867 static int
868 swcr_setup_cipher(struct swcr_session *ses,
869     const struct crypto_session_params *csp)
870 {
871 	struct swcr_encdec *swe;
872 	struct enc_xform *txf;
873 	int error;
874 
875 	swe = &ses->swcr_encdec;
876 	txf = crypto_cipher(csp);
877 	MPASS(txf->ivsize == csp->csp_ivlen);
878 	if (txf->ctxsize != 0) {
879 		swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA,
880 		    M_NOWAIT);
881 		if (swe->sw_kschedule == NULL)
882 			return (ENOMEM);
883 	}
884 	if (csp->csp_cipher_key != NULL) {
885 		error = txf->setkey(swe->sw_kschedule,
886 		    csp->csp_cipher_key, csp->csp_cipher_klen);
887 		if (error)
888 			return (error);
889 	}
890 	swe->sw_exf = txf;
891 	return (0);
892 }
893 
894 static int
895 swcr_setup_auth(struct swcr_session *ses,
896     const struct crypto_session_params *csp)
897 {
898 	struct swcr_auth *swa;
899 	struct auth_hash *axf;
900 
901 	swa = &ses->swcr_auth;
902 
903 	axf = crypto_auth_hash(csp);
904 	swa->sw_axf = axf;
905 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
906 		return (EINVAL);
907 	if (csp->csp_auth_mlen == 0)
908 		swa->sw_mlen = axf->hashsize;
909 	else
910 		swa->sw_mlen = csp->csp_auth_mlen;
911 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
912 	if (swa->sw_ictx == NULL)
913 		return (ENOBUFS);
914 
915 	switch (csp->csp_auth_alg) {
916 	case CRYPTO_SHA1_HMAC:
917 	case CRYPTO_SHA2_224_HMAC:
918 	case CRYPTO_SHA2_256_HMAC:
919 	case CRYPTO_SHA2_384_HMAC:
920 	case CRYPTO_SHA2_512_HMAC:
921 	case CRYPTO_NULL_HMAC:
922 	case CRYPTO_RIPEMD160_HMAC:
923 		swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
924 		    M_NOWAIT);
925 		if (swa->sw_octx == NULL)
926 			return (ENOBUFS);
927 
928 		if (csp->csp_auth_key != NULL) {
929 			swcr_authprepare(axf, swa, csp->csp_auth_key,
930 			    csp->csp_auth_klen);
931 		}
932 
933 		if (csp->csp_mode == CSP_MODE_DIGEST)
934 			ses->swcr_process = swcr_authcompute;
935 		break;
936 	case CRYPTO_SHA1:
937 	case CRYPTO_SHA2_224:
938 	case CRYPTO_SHA2_256:
939 	case CRYPTO_SHA2_384:
940 	case CRYPTO_SHA2_512:
941 		axf->Init(swa->sw_ictx);
942 		if (csp->csp_mode == CSP_MODE_DIGEST)
943 			ses->swcr_process = swcr_authcompute;
944 		break;
945 	case CRYPTO_AES_NIST_GMAC:
946 		axf->Init(swa->sw_ictx);
947 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
948 		    csp->csp_auth_klen);
949 		if (csp->csp_mode == CSP_MODE_DIGEST)
950 			ses->swcr_process = swcr_gmac;
951 		break;
952 	case CRYPTO_POLY1305:
953 	case CRYPTO_BLAKE2B:
954 	case CRYPTO_BLAKE2S:
955 		/*
956 		 * Blake2b and Blake2s support an optional key but do
957 		 * not require one.
958 		 */
959 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
960 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
961 			    csp->csp_auth_klen);
962 		axf->Init(swa->sw_ictx);
963 		if (csp->csp_mode == CSP_MODE_DIGEST)
964 			ses->swcr_process = swcr_authcompute;
965 		break;
966 	case CRYPTO_AES_CCM_CBC_MAC:
967 		axf->Init(swa->sw_ictx);
968 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
969 		    csp->csp_auth_klen);
970 		if (csp->csp_mode == CSP_MODE_DIGEST)
971 			ses->swcr_process = swcr_ccm_cbc_mac;
972 		break;
973 	}
974 
975 	return (0);
976 }
977 
978 static int
979 swcr_setup_gcm(struct swcr_session *ses,
980     const struct crypto_session_params *csp)
981 {
982 	struct swcr_auth *swa;
983 	struct auth_hash *axf;
984 
985 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
986 		return (EINVAL);
987 
988 	/* First, setup the auth side. */
989 	swa = &ses->swcr_auth;
990 	switch (csp->csp_cipher_klen * 8) {
991 	case 128:
992 		axf = &auth_hash_nist_gmac_aes_128;
993 		break;
994 	case 192:
995 		axf = &auth_hash_nist_gmac_aes_192;
996 		break;
997 	case 256:
998 		axf = &auth_hash_nist_gmac_aes_256;
999 		break;
1000 	default:
1001 		return (EINVAL);
1002 	}
1003 	swa->sw_axf = axf;
1004 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1005 		return (EINVAL);
1006 	if (csp->csp_auth_mlen == 0)
1007 		swa->sw_mlen = axf->hashsize;
1008 	else
1009 		swa->sw_mlen = csp->csp_auth_mlen;
1010 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1011 	if (swa->sw_ictx == NULL)
1012 		return (ENOBUFS);
1013 	axf->Init(swa->sw_ictx);
1014 	if (csp->csp_cipher_key != NULL)
1015 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1016 		    csp->csp_cipher_klen);
1017 
1018 	/* Second, setup the cipher side. */
1019 	return (swcr_setup_cipher(ses, csp));
1020 }
1021 
1022 static int
1023 swcr_setup_ccm(struct swcr_session *ses,
1024     const struct crypto_session_params *csp)
1025 {
1026 	struct swcr_auth *swa;
1027 	struct auth_hash *axf;
1028 
1029 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1030 		return (EINVAL);
1031 
1032 	/* First, setup the auth side. */
1033 	swa = &ses->swcr_auth;
1034 	switch (csp->csp_cipher_klen * 8) {
1035 	case 128:
1036 		axf = &auth_hash_ccm_cbc_mac_128;
1037 		break;
1038 	case 192:
1039 		axf = &auth_hash_ccm_cbc_mac_192;
1040 		break;
1041 	case 256:
1042 		axf = &auth_hash_ccm_cbc_mac_256;
1043 		break;
1044 	default:
1045 		return (EINVAL);
1046 	}
1047 	swa->sw_axf = axf;
1048 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1049 		return (EINVAL);
1050 	if (csp->csp_auth_mlen == 0)
1051 		swa->sw_mlen = axf->hashsize;
1052 	else
1053 		swa->sw_mlen = csp->csp_auth_mlen;
1054 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1055 	if (swa->sw_ictx == NULL)
1056 		return (ENOBUFS);
1057 	axf->Init(swa->sw_ictx);
1058 	if (csp->csp_cipher_key != NULL)
1059 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1060 		    csp->csp_cipher_klen);
1061 
1062 	/* Second, setup the cipher side. */
1063 	return (swcr_setup_cipher(ses, csp));
1064 }
1065 
1066 static bool
1067 swcr_auth_supported(const struct crypto_session_params *csp)
1068 {
1069 	struct auth_hash *axf;
1070 
1071 	axf = crypto_auth_hash(csp);
1072 	if (axf == NULL)
1073 		return (false);
1074 	switch (csp->csp_auth_alg) {
1075 	case CRYPTO_SHA1_HMAC:
1076 	case CRYPTO_SHA2_224_HMAC:
1077 	case CRYPTO_SHA2_256_HMAC:
1078 	case CRYPTO_SHA2_384_HMAC:
1079 	case CRYPTO_SHA2_512_HMAC:
1080 	case CRYPTO_NULL_HMAC:
1081 	case CRYPTO_RIPEMD160_HMAC:
1082 		break;
1083 	case CRYPTO_AES_NIST_GMAC:
1084 		switch (csp->csp_auth_klen * 8) {
1085 		case 128:
1086 		case 192:
1087 		case 256:
1088 			break;
1089 		default:
1090 			return (false);
1091 		}
1092 		if (csp->csp_auth_key == NULL)
1093 			return (false);
1094 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1095 			return (false);
1096 		break;
1097 	case CRYPTO_POLY1305:
1098 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1099 			return (false);
1100 		break;
1101 	case CRYPTO_AES_CCM_CBC_MAC:
1102 		switch (csp->csp_auth_klen * 8) {
1103 		case 128:
1104 		case 192:
1105 		case 256:
1106 			break;
1107 		default:
1108 			return (false);
1109 		}
1110 		if (csp->csp_auth_key == NULL)
1111 			return (false);
1112 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1113 			return (false);
1114 		break;
1115 	}
1116 	return (true);
1117 }
1118 
1119 static bool
1120 swcr_cipher_supported(const struct crypto_session_params *csp)
1121 {
1122 	struct enc_xform *txf;
1123 
1124 	txf = crypto_cipher(csp);
1125 	if (txf == NULL)
1126 		return (false);
1127 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1128 	    txf->ivsize != csp->csp_ivlen)
1129 		return (false);
1130 	return (true);
1131 }
1132 
1133 static int
1134 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1135 {
1136 
1137 	if (csp->csp_flags != 0)
1138 		return (EINVAL);
1139 	switch (csp->csp_mode) {
1140 	case CSP_MODE_COMPRESS:
1141 		switch (csp->csp_cipher_alg) {
1142 		case CRYPTO_DEFLATE_COMP:
1143 			break;
1144 		default:
1145 			return (EINVAL);
1146 		}
1147 		break;
1148 	case CSP_MODE_CIPHER:
1149 		switch (csp->csp_cipher_alg) {
1150 		case CRYPTO_AES_NIST_GCM_16:
1151 		case CRYPTO_AES_CCM_16:
1152 			return (EINVAL);
1153 		default:
1154 			if (!swcr_cipher_supported(csp))
1155 				return (EINVAL);
1156 			break;
1157 		}
1158 		break;
1159 	case CSP_MODE_DIGEST:
1160 		if (!swcr_auth_supported(csp))
1161 			return (EINVAL);
1162 		break;
1163 	case CSP_MODE_AEAD:
1164 		switch (csp->csp_cipher_alg) {
1165 		case CRYPTO_AES_NIST_GCM_16:
1166 		case CRYPTO_AES_CCM_16:
1167 			break;
1168 		default:
1169 			return (EINVAL);
1170 		}
1171 		break;
1172 	case CSP_MODE_ETA:
1173 		/* AEAD algorithms cannot be used for EtA. */
1174 		switch (csp->csp_cipher_alg) {
1175 		case CRYPTO_AES_NIST_GCM_16:
1176 		case CRYPTO_AES_CCM_16:
1177 			return (EINVAL);
1178 		}
1179 		switch (csp->csp_auth_alg) {
1180 		case CRYPTO_AES_NIST_GMAC:
1181 		case CRYPTO_AES_CCM_CBC_MAC:
1182 			return (EINVAL);
1183 		}
1184 
1185 		if (!swcr_cipher_supported(csp) ||
1186 		    !swcr_auth_supported(csp))
1187 			return (EINVAL);
1188 		break;
1189 	default:
1190 		return (EINVAL);
1191 	}
1192 
1193 	return (CRYPTODEV_PROBE_SOFTWARE);
1194 }
1195 
1196 /*
1197  * Generate a new software session.
1198  */
1199 static int
1200 swcr_newsession(device_t dev, crypto_session_t cses,
1201     const struct crypto_session_params *csp)
1202 {
1203 	struct swcr_session *ses;
1204 	struct swcr_encdec *swe;
1205 	struct swcr_auth *swa;
1206 	struct comp_algo *cxf;
1207 	int error;
1208 
1209 	ses = crypto_get_driver_session(cses);
1210 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1211 
1212 	error = 0;
1213 	swe = &ses->swcr_encdec;
1214 	swa = &ses->swcr_auth;
1215 	switch (csp->csp_mode) {
1216 	case CSP_MODE_COMPRESS:
1217 		switch (csp->csp_cipher_alg) {
1218 		case CRYPTO_DEFLATE_COMP:
1219 			cxf = &comp_algo_deflate;
1220 			break;
1221 #ifdef INVARIANTS
1222 		default:
1223 			panic("bad compression algo");
1224 #endif
1225 		}
1226 		ses->swcr_compdec.sw_cxf = cxf;
1227 		ses->swcr_process = swcr_compdec;
1228 		break;
1229 	case CSP_MODE_CIPHER:
1230 		switch (csp->csp_cipher_alg) {
1231 		case CRYPTO_NULL_CBC:
1232 			ses->swcr_process = swcr_null;
1233 			break;
1234 #ifdef INVARIANTS
1235 		case CRYPTO_AES_NIST_GCM_16:
1236 		case CRYPTO_AES_CCM_16:
1237 			panic("bad cipher algo");
1238 #endif
1239 		default:
1240 			error = swcr_setup_cipher(ses, csp);
1241 			if (error == 0)
1242 				ses->swcr_process = swcr_encdec;
1243 		}
1244 		break;
1245 	case CSP_MODE_DIGEST:
1246 		error = swcr_setup_auth(ses, csp);
1247 		break;
1248 	case CSP_MODE_AEAD:
1249 		switch (csp->csp_cipher_alg) {
1250 		case CRYPTO_AES_NIST_GCM_16:
1251 			error = swcr_setup_gcm(ses, csp);
1252 			if (error == 0)
1253 				ses->swcr_process = swcr_gcm;
1254 			break;
1255 		case CRYPTO_AES_CCM_16:
1256 			error = swcr_setup_ccm(ses, csp);
1257 			if (error == 0)
1258 				ses->swcr_process = swcr_ccm;
1259 			break;
1260 #ifdef INVARIANTS
1261 		default:
1262 			panic("bad aead algo");
1263 #endif
1264 		}
1265 		break;
1266 	case CSP_MODE_ETA:
1267 #ifdef INVARIANTS
1268 		switch (csp->csp_cipher_alg) {
1269 		case CRYPTO_AES_NIST_GCM_16:
1270 		case CRYPTO_AES_CCM_16:
1271 			panic("bad eta cipher algo");
1272 		}
1273 		switch (csp->csp_auth_alg) {
1274 		case CRYPTO_AES_NIST_GMAC:
1275 		case CRYPTO_AES_CCM_CBC_MAC:
1276 			panic("bad eta auth algo");
1277 		}
1278 #endif
1279 
1280 		error = swcr_setup_auth(ses, csp);
1281 		if (error)
1282 			break;
1283 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1284 			/* Effectively degrade to digest mode. */
1285 			ses->swcr_process = swcr_authcompute;
1286 			break;
1287 		}
1288 
1289 		error = swcr_setup_cipher(ses, csp);
1290 		if (error == 0)
1291 			ses->swcr_process = swcr_eta;
1292 		break;
1293 	default:
1294 		error = EINVAL;
1295 	}
1296 
1297 	if (error)
1298 		swcr_freesession(dev, cses);
1299 	return (error);
1300 }
1301 
1302 static void
1303 swcr_freesession(device_t dev, crypto_session_t cses)
1304 {
1305 	struct swcr_session *ses;
1306 	struct swcr_auth *swa;
1307 	struct auth_hash *axf;
1308 
1309 	ses = crypto_get_driver_session(cses);
1310 
1311 	mtx_destroy(&ses->swcr_lock);
1312 
1313 	zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA);
1314 
1315 	axf = ses->swcr_auth.sw_axf;
1316 	if (axf != NULL) {
1317 		swa = &ses->swcr_auth;
1318 		if (swa->sw_ictx != NULL) {
1319 			explicit_bzero(swa->sw_ictx, axf->ctxsize);
1320 			free(swa->sw_ictx, M_CRYPTO_DATA);
1321 		}
1322 		if (swa->sw_octx != NULL) {
1323 			explicit_bzero(swa->sw_octx, axf->ctxsize);
1324 			free(swa->sw_octx, M_CRYPTO_DATA);
1325 		}
1326 	}
1327 }
1328 
1329 /*
1330  * Process a software request.
1331  */
1332 static int
1333 swcr_process(device_t dev, struct cryptop *crp, int hint)
1334 {
1335 	struct swcr_session *ses;
1336 
1337 	ses = crypto_get_driver_session(crp->crp_session);
1338 	mtx_lock(&ses->swcr_lock);
1339 
1340 	crp->crp_etype = ses->swcr_process(ses, crp);
1341 
1342 	mtx_unlock(&ses->swcr_lock);
1343 	crypto_done(crp);
1344 	return (0);
1345 }
1346 
1347 static void
1348 swcr_identify(driver_t *drv, device_t parent)
1349 {
1350 	/* NB: order 10 is so we get attached after h/w devices */
1351 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1352 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1353 		panic("cryptosoft: could not attach");
1354 }
1355 
1356 static int
1357 swcr_probe(device_t dev)
1358 {
1359 	device_set_desc(dev, "software crypto");
1360 	return (BUS_PROBE_NOWILDCARD);
1361 }
1362 
1363 static int
1364 swcr_attach(device_t dev)
1365 {
1366 
1367 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1368 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1369 	if (swcr_id < 0) {
1370 		device_printf(dev, "cannot initialize!");
1371 		return (ENXIO);
1372 	}
1373 
1374 	return (0);
1375 }
1376 
1377 static int
1378 swcr_detach(device_t dev)
1379 {
1380 	crypto_unregister_all(swcr_id);
1381 	return 0;
1382 }
1383 
1384 static device_method_t swcr_methods[] = {
1385 	DEVMETHOD(device_identify,	swcr_identify),
1386 	DEVMETHOD(device_probe,		swcr_probe),
1387 	DEVMETHOD(device_attach,	swcr_attach),
1388 	DEVMETHOD(device_detach,	swcr_detach),
1389 
1390 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1391 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1392 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1393 	DEVMETHOD(cryptodev_process,	swcr_process),
1394 
1395 	{0, 0},
1396 };
1397 
1398 static driver_t swcr_driver = {
1399 	"cryptosoft",
1400 	swcr_methods,
1401 	0,		/* NB: no softc */
1402 };
1403 static devclass_t swcr_devclass;
1404 
1405 /*
1406  * NB: We explicitly reference the crypto module so we
1407  * get the necessary ordering when built as a loadable
1408  * module.  This is required because we bundle the crypto
1409  * module code together with the cryptosoft driver (otherwise
1410  * normal module dependencies would handle things).
1411  */
1412 extern int crypto_modevent(struct module *, int, void *);
1413 /* XXX where to attach */
1414 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1415 MODULE_VERSION(cryptosoft, 1);
1416 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1417