xref: /freebsd/sys/opencrypto/cryptosoft.c (revision 0e00c709d7f1cdaeb584d244df9534bcdd0ac527)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 #include <sys/md5.h>
53 
54 #include <opencrypto/cryptodev.h>
55 #include <opencrypto/xform.h>
56 
57 #include <sys/kobj.h>
58 #include <sys/bus.h>
59 #include "cryptodev_if.h"
60 
61 struct swcr_auth {
62 	void		*sw_ictx;
63 	void		*sw_octx;
64 	struct auth_hash *sw_axf;
65 	uint16_t	sw_mlen;
66 };
67 
68 struct swcr_encdec {
69 	uint8_t		*sw_kschedule;
70 	struct enc_xform *sw_exf;
71 };
72 
73 struct swcr_compdec {
74 	struct comp_algo *sw_cxf;
75 };
76 
77 struct swcr_session {
78 	struct mtx	swcr_lock;
79 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
80 
81 	struct swcr_auth swcr_auth;
82 	struct swcr_encdec swcr_encdec;
83 	struct swcr_compdec swcr_compdec;
84 };
85 
86 static	int32_t swcr_id;
87 
88 static	void swcr_freesession(device_t dev, crypto_session_t cses);
89 
90 /* Used for CRYPTO_NULL_CBC. */
91 static int
92 swcr_null(struct swcr_session *ses, struct cryptop *crp)
93 {
94 
95 	return (0);
96 }
97 
98 /*
99  * Apply a symmetric encryption/decryption algorithm.
100  */
101 static int
102 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
103 {
104 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
105 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
106 	const struct crypto_session_params *csp;
107 	struct swcr_encdec *sw;
108 	struct enc_xform *exf;
109 	int i, j, k, blks, ind, count, ivlen;
110 	struct uio *uio, uiolcl;
111 	struct iovec iovlcl[4];
112 	struct iovec *iov;
113 	int iovcnt, iovalloc;
114 	int error;
115 	bool encrypting;
116 
117 	error = 0;
118 
119 	sw = &ses->swcr_encdec;
120 	exf = sw->sw_exf;
121 	blks = exf->blocksize;
122 	ivlen = exf->ivsize;
123 
124 	/* Check for non-padded data */
125 	if ((crp->crp_payload_length % blks) != 0)
126 		return EINVAL;
127 
128 	if (exf == &enc_xform_aes_icm &&
129 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
130 		return (EINVAL);
131 
132 	crypto_read_iv(crp, iv);
133 
134 	if (crp->crp_cipher_key != NULL) {
135 		if (sw->sw_kschedule)
136 			exf->zerokey(&(sw->sw_kschedule));
137 
138 		csp = crypto_get_params(crp->crp_session);
139 		error = exf->setkey(&sw->sw_kschedule,
140 		    crp->crp_cipher_key, csp->csp_cipher_klen);
141 		if (error)
142 			return (error);
143 	}
144 
145 	iov = iovlcl;
146 	iovcnt = nitems(iovlcl);
147 	iovalloc = 0;
148 	uio = &uiolcl;
149 	switch (crp->crp_buf_type) {
150 	case CRYPTO_BUF_MBUF:
151 		error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt,
152 		    &iovalloc);
153 		if (error)
154 			return (error);
155 		uio->uio_iov = iov;
156 		uio->uio_iovcnt = iovcnt;
157 		break;
158 	case CRYPTO_BUF_UIO:
159 		uio = crp->crp_uio;
160 		break;
161 	case CRYPTO_BUF_CONTIG:
162 		iov[0].iov_base = crp->crp_buf;
163 		iov[0].iov_len = crp->crp_ilen;
164 		uio->uio_iov = iov;
165 		uio->uio_iovcnt = 1;
166 		break;
167 	}
168 
169 	ivp = iv;
170 
171 	if (exf->reinit) {
172 		/*
173 		 * xforms that provide a reinit method perform all IV
174 		 * handling themselves.
175 		 */
176 		exf->reinit(sw->sw_kschedule, iv);
177 	}
178 
179 	count = crp->crp_payload_start;
180 	ind = cuio_getptr(uio, count, &k);
181 	if (ind == -1) {
182 		error = EINVAL;
183 		goto out;
184 	}
185 
186 	i = crp->crp_payload_length;
187 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
188 
189 	while (i > 0) {
190 		/*
191 		 * If there's insufficient data at the end of
192 		 * an iovec, we have to do some copying.
193 		 */
194 		if (uio->uio_iov[ind].iov_len < k + blks &&
195 		    uio->uio_iov[ind].iov_len != k) {
196 			cuio_copydata(uio, count, blks, blk);
197 
198 			/* Actual encryption/decryption */
199 			if (exf->reinit) {
200 				if (encrypting) {
201 					exf->encrypt(sw->sw_kschedule,
202 					    blk);
203 				} else {
204 					exf->decrypt(sw->sw_kschedule,
205 					    blk);
206 				}
207 			} else if (encrypting) {
208 				/* XOR with previous block */
209 				for (j = 0; j < blks; j++)
210 					blk[j] ^= ivp[j];
211 
212 				exf->encrypt(sw->sw_kschedule, blk);
213 
214 				/*
215 				 * Keep encrypted block for XOR'ing
216 				 * with next block
217 				 */
218 				bcopy(blk, iv, blks);
219 				ivp = iv;
220 			} else {	/* decrypt */
221 				/*
222 				 * Keep encrypted block for XOR'ing
223 				 * with next block
224 				 */
225 				nivp = (ivp == iv) ? iv2 : iv;
226 				bcopy(blk, nivp, blks);
227 
228 				exf->decrypt(sw->sw_kschedule, blk);
229 
230 				/* XOR with previous block */
231 				for (j = 0; j < blks; j++)
232 					blk[j] ^= ivp[j];
233 
234 				ivp = nivp;
235 			}
236 
237 			/* Copy back decrypted block */
238 			cuio_copyback(uio, count, blks, blk);
239 
240 			count += blks;
241 
242 			/* Advance pointer */
243 			ind = cuio_getptr(uio, count, &k);
244 			if (ind == -1) {
245 				error = EINVAL;
246 				goto out;
247 			}
248 
249 			i -= blks;
250 
251 			/* Could be done... */
252 			if (i == 0)
253 				break;
254 		}
255 
256 		while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
257 			uint8_t *idat;
258 			size_t nb, rem;
259 
260 			nb = blks;
261 			rem = MIN((size_t)i,
262 			    uio->uio_iov[ind].iov_len - (size_t)k);
263 			idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
264 
265 			if (exf->reinit) {
266 				if (encrypting && exf->encrypt_multi == NULL)
267 					exf->encrypt(sw->sw_kschedule,
268 					    idat);
269 				else if (encrypting) {
270 					nb = rounddown(rem, blks);
271 					exf->encrypt_multi(sw->sw_kschedule,
272 					    idat, nb);
273 				} else if (exf->decrypt_multi == NULL)
274 					exf->decrypt(sw->sw_kschedule,
275 					    idat);
276 				else {
277 					nb = rounddown(rem, blks);
278 					exf->decrypt_multi(sw->sw_kschedule,
279 					    idat, nb);
280 				}
281 			} else if (encrypting) {
282 				/* XOR with previous block/IV */
283 				for (j = 0; j < blks; j++)
284 					idat[j] ^= ivp[j];
285 
286 				exf->encrypt(sw->sw_kschedule, idat);
287 				ivp = idat;
288 			} else {	/* decrypt */
289 				/*
290 				 * Keep encrypted block to be used
291 				 * in next block's processing.
292 				 */
293 				nivp = (ivp == iv) ? iv2 : iv;
294 				bcopy(idat, nivp, blks);
295 
296 				exf->decrypt(sw->sw_kschedule, idat);
297 
298 				/* XOR with previous block/IV */
299 				for (j = 0; j < blks; j++)
300 					idat[j] ^= ivp[j];
301 
302 				ivp = nivp;
303 			}
304 
305 			count += nb;
306 			k += nb;
307 			i -= nb;
308 		}
309 
310 		/*
311 		 * Advance to the next iov if the end of the current iov
312 		 * is aligned with the end of a cipher block.
313 		 * Note that the code is equivalent to calling:
314 		 *      ind = cuio_getptr(uio, count, &k);
315 		 */
316 		if (i > 0 && k == uio->uio_iov[ind].iov_len) {
317 			k = 0;
318 			ind++;
319 			if (ind >= uio->uio_iovcnt) {
320 				error = EINVAL;
321 				goto out;
322 			}
323 		}
324 	}
325 
326 out:
327 	if (iovalloc)
328 		free(iov, M_CRYPTO_DATA);
329 
330 	return (error);
331 }
332 
333 static void
334 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
335     const uint8_t *key, int klen)
336 {
337 
338 	switch (axf->type) {
339 	case CRYPTO_MD5_HMAC:
340 	case CRYPTO_SHA1_HMAC:
341 	case CRYPTO_SHA2_224_HMAC:
342 	case CRYPTO_SHA2_256_HMAC:
343 	case CRYPTO_SHA2_384_HMAC:
344 	case CRYPTO_SHA2_512_HMAC:
345 	case CRYPTO_NULL_HMAC:
346 	case CRYPTO_RIPEMD160_HMAC:
347 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
348 		hmac_init_opad(axf, key, klen, sw->sw_octx);
349 		break;
350 	case CRYPTO_POLY1305:
351 	case CRYPTO_BLAKE2B:
352 	case CRYPTO_BLAKE2S:
353 		axf->Setkey(sw->sw_ictx, key, klen);
354 		axf->Init(sw->sw_ictx);
355 		break;
356 	default:
357 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
358 	}
359 }
360 
361 /*
362  * Compute or verify hash.
363  */
364 static int
365 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
366 {
367 	u_char aalg[HASH_MAX_LEN];
368 	u_char uaalg[HASH_MAX_LEN];
369 	const struct crypto_session_params *csp;
370 	struct swcr_auth *sw;
371 	struct auth_hash *axf;
372 	union authctx ctx;
373 	int err;
374 
375 	sw = &ses->swcr_auth;
376 
377 	axf = sw->sw_axf;
378 
379 	if (crp->crp_auth_key != NULL) {
380 		csp = crypto_get_params(crp->crp_session);
381 		swcr_authprepare(axf, sw, crp->crp_auth_key,
382 		    csp->csp_auth_klen);
383 	}
384 
385 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
386 
387 	err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
388 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
389 	if (err)
390 		return err;
391 
392 	err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
393 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
394 	if (err)
395 		return err;
396 
397 	switch (axf->type) {
398 	case CRYPTO_SHA1:
399 	case CRYPTO_SHA2_224:
400 	case CRYPTO_SHA2_256:
401 	case CRYPTO_SHA2_384:
402 	case CRYPTO_SHA2_512:
403 		axf->Final(aalg, &ctx);
404 		break;
405 
406 	case CRYPTO_MD5_HMAC:
407 	case CRYPTO_SHA1_HMAC:
408 	case CRYPTO_SHA2_224_HMAC:
409 	case CRYPTO_SHA2_256_HMAC:
410 	case CRYPTO_SHA2_384_HMAC:
411 	case CRYPTO_SHA2_512_HMAC:
412 	case CRYPTO_RIPEMD160_HMAC:
413 		if (sw->sw_octx == NULL)
414 			return EINVAL;
415 
416 		axf->Final(aalg, &ctx);
417 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
418 		axf->Update(&ctx, aalg, axf->hashsize);
419 		axf->Final(aalg, &ctx);
420 		break;
421 
422 	case CRYPTO_BLAKE2B:
423 	case CRYPTO_BLAKE2S:
424 	case CRYPTO_NULL_HMAC:
425 	case CRYPTO_POLY1305:
426 		axf->Final(aalg, &ctx);
427 		break;
428 	}
429 
430 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
431 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
432 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
433 			return (EBADMSG);
434 	} else {
435 		/* Inject the authentication data */
436 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
437 	}
438 	return (0);
439 }
440 
441 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
442 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
443 
444 static int
445 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
446 {
447 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
448 	u_char *blk = (u_char *)blkbuf;
449 	u_char aalg[AALG_MAX_RESULT_LEN];
450 	u_char uaalg[AALG_MAX_RESULT_LEN];
451 	u_char iv[EALG_MAX_BLOCK_LEN];
452 	union authctx ctx;
453 	struct swcr_auth *swa;
454 	struct auth_hash *axf;
455 	uint32_t *blkp;
456 	int blksz, i, ivlen, len;
457 
458 	swa = &ses->swcr_auth;
459 	axf = swa->sw_axf;
460 
461 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
462 	blksz = axf->blocksize;
463 
464 	/* Initialize the IV */
465 	ivlen = AES_GCM_IV_LEN;
466 	crypto_read_iv(crp, iv);
467 
468 	axf->Reinit(&ctx, iv, ivlen);
469 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
470 		len = MIN(crp->crp_payload_length - i, blksz);
471 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
472 		bzero(blk + len, blksz - len);
473 		axf->Update(&ctx, blk, blksz);
474 	}
475 
476 	/* length block */
477 	bzero(blk, blksz);
478 	blkp = (uint32_t *)blk + 1;
479 	*blkp = htobe32(crp->crp_payload_length * 8);
480 	axf->Update(&ctx, blk, blksz);
481 
482 	/* Finalize MAC */
483 	axf->Final(aalg, &ctx);
484 
485 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
486 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
487 		    uaalg);
488 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
489 			return (EBADMSG);
490 	} else {
491 		/* Inject the authentication data */
492 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
493 	}
494 	return (0);
495 }
496 
497 static int
498 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
499 {
500 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
501 	u_char *blk = (u_char *)blkbuf;
502 	u_char aalg[AALG_MAX_RESULT_LEN];
503 	u_char uaalg[AALG_MAX_RESULT_LEN];
504 	u_char iv[EALG_MAX_BLOCK_LEN];
505 	union authctx ctx;
506 	struct swcr_auth *swa;
507 	struct swcr_encdec *swe;
508 	struct auth_hash *axf;
509 	struct enc_xform *exf;
510 	uint32_t *blkp;
511 	int blksz, i, ivlen, len, r;
512 
513 	swa = &ses->swcr_auth;
514 	axf = swa->sw_axf;
515 
516 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
517 	blksz = axf->blocksize;
518 
519 	swe = &ses->swcr_encdec;
520 	exf = swe->sw_exf;
521 
522 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
523 		return (EINVAL);
524 
525 	/* Initialize the IV */
526 	ivlen = AES_GCM_IV_LEN;
527 	bcopy(crp->crp_iv, iv, ivlen);
528 
529 	/* Supply MAC with IV */
530 	axf->Reinit(&ctx, iv, ivlen);
531 
532 	/* Supply MAC with AAD */
533 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
534 		len = MIN(crp->crp_aad_length - i, blksz);
535 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
536 		bzero(blk + len, blksz - len);
537 		axf->Update(&ctx, blk, blksz);
538 	}
539 
540 	exf->reinit(swe->sw_kschedule, iv);
541 
542 	/* Do encryption with MAC */
543 	for (i = 0; i < crp->crp_payload_length; i += len) {
544 		len = MIN(crp->crp_payload_length - i, blksz);
545 		if (len < blksz)
546 			bzero(blk, blksz);
547 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
548 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
549 			exf->encrypt(swe->sw_kschedule, blk);
550 			axf->Update(&ctx, blk, len);
551 			crypto_copyback(crp, crp->crp_payload_start + i, len,
552 			    blk);
553 		} else {
554 			axf->Update(&ctx, blk, len);
555 		}
556 	}
557 
558 	/* length block */
559 	bzero(blk, blksz);
560 	blkp = (uint32_t *)blk + 1;
561 	*blkp = htobe32(crp->crp_aad_length * 8);
562 	blkp = (uint32_t *)blk + 3;
563 	*blkp = htobe32(crp->crp_payload_length * 8);
564 	axf->Update(&ctx, blk, blksz);
565 
566 	/* Finalize MAC */
567 	axf->Final(aalg, &ctx);
568 
569 	/* Validate tag */
570 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
571 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
572 		    uaalg);
573 
574 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
575 		if (r != 0)
576 			return (EBADMSG);
577 
578 		/* tag matches, decrypt data */
579 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
580 			len = MIN(crp->crp_payload_length - i, blksz);
581 			if (len < blksz)
582 				bzero(blk, blksz);
583 			crypto_copydata(crp, crp->crp_payload_start + i, len,
584 			    blk);
585 			exf->decrypt(swe->sw_kschedule, blk);
586 			crypto_copyback(crp, crp->crp_payload_start + i, len,
587 			    blk);
588 		}
589 	} else {
590 		/* Inject the authentication data */
591 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
592 		    aalg);
593 	}
594 
595 	return (0);
596 }
597 
598 static int
599 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
600 {
601 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
602 	u_char *blk = (u_char *)blkbuf;
603 	u_char aalg[AALG_MAX_RESULT_LEN];
604 	u_char uaalg[AALG_MAX_RESULT_LEN];
605 	u_char iv[EALG_MAX_BLOCK_LEN];
606 	union authctx ctx;
607 	struct swcr_auth *swa;
608 	struct auth_hash *axf;
609 	int blksz, i, ivlen, len;
610 
611 	swa = &ses->swcr_auth;
612 	axf = swa->sw_axf;
613 
614 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
615 	blksz = axf->blocksize;
616 
617 	/* Initialize the IV */
618 	ivlen = AES_CCM_IV_LEN;
619 	crypto_read_iv(crp, iv);
620 
621 	/*
622 	 * AES CCM-CBC-MAC needs to know the length of both the auth
623 	 * data and payload data before doing the auth computation.
624 	 */
625 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
626 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
627 
628 	axf->Reinit(&ctx, iv, ivlen);
629 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
630 		len = MIN(crp->crp_payload_length - i, blksz);
631 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
632 		bzero(blk + len, blksz - len);
633 		axf->Update(&ctx, blk, blksz);
634 	}
635 
636 	/* Finalize MAC */
637 	axf->Final(aalg, &ctx);
638 
639 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
640 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
641 		    uaalg);
642 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
643 			return (EBADMSG);
644 	} else {
645 		/* Inject the authentication data */
646 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
647 	}
648 	return (0);
649 }
650 
651 static int
652 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
653 {
654 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
655 	u_char *blk = (u_char *)blkbuf;
656 	u_char aalg[AALG_MAX_RESULT_LEN];
657 	u_char uaalg[AALG_MAX_RESULT_LEN];
658 	u_char iv[EALG_MAX_BLOCK_LEN];
659 	union authctx ctx;
660 	struct swcr_auth *swa;
661 	struct swcr_encdec *swe;
662 	struct auth_hash *axf;
663 	struct enc_xform *exf;
664 	int blksz, i, ivlen, len, r;
665 
666 	swa = &ses->swcr_auth;
667 	axf = swa->sw_axf;
668 
669 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
670 	blksz = axf->blocksize;
671 
672 	swe = &ses->swcr_encdec;
673 	exf = swe->sw_exf;
674 
675 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
676 		return (EINVAL);
677 
678 	/* Initialize the IV */
679 	ivlen = AES_CCM_IV_LEN;
680 	bcopy(crp->crp_iv, iv, ivlen);
681 
682 	/*
683 	 * AES CCM-CBC-MAC needs to know the length of both the auth
684 	 * data and payload data before doing the auth computation.
685 	 */
686 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
687 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
688 
689 	/* Supply MAC with IV */
690 	axf->Reinit(&ctx, iv, ivlen);
691 
692 	/* Supply MAC with AAD */
693 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
694 		len = MIN(crp->crp_aad_length - i, blksz);
695 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
696 		bzero(blk + len, blksz - len);
697 		axf->Update(&ctx, blk, blksz);
698 	}
699 
700 	exf->reinit(swe->sw_kschedule, iv);
701 
702 	/* Do encryption/decryption with MAC */
703 	for (i = 0; i < crp->crp_payload_length; i += len) {
704 		len = MIN(crp->crp_payload_length - i, blksz);
705 		if (len < blksz)
706 			bzero(blk, blksz);
707 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
708 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
709 			axf->Update(&ctx, blk, len);
710 			exf->encrypt(swe->sw_kschedule, blk);
711 			crypto_copyback(crp, crp->crp_payload_start + i, len,
712 			    blk);
713 		} else {
714 			/*
715 			 * One of the problems with CCM+CBC is that
716 			 * the authentication is done on the
717 			 * unecncrypted data.  As a result, we have to
718 			 * decrypt the data twice: once to generate
719 			 * the tag and a second time after the tag is
720 			 * verified.
721 			 */
722 			exf->decrypt(swe->sw_kschedule, blk);
723 			axf->Update(&ctx, blk, len);
724 		}
725 	}
726 
727 	/* Finalize MAC */
728 	axf->Final(aalg, &ctx);
729 
730 	/* Validate tag */
731 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
732 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
733 		    uaalg);
734 
735 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
736 		if (r != 0)
737 			return (EBADMSG);
738 
739 		/* tag matches, decrypt data */
740 		exf->reinit(swe->sw_kschedule, iv);
741 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
742 			len = MIN(crp->crp_payload_length - i, blksz);
743 			if (len < blksz)
744 				bzero(blk, blksz);
745 			crypto_copydata(crp, crp->crp_payload_start + i, len,
746 			    blk);
747 			exf->decrypt(swe->sw_kschedule, blk);
748 			crypto_copyback(crp, crp->crp_payload_start + i, len,
749 			    blk);
750 		}
751 	} else {
752 		/* Inject the authentication data */
753 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
754 		    aalg);
755 	}
756 
757 	return (0);
758 }
759 
760 /*
761  * Apply a cipher and a digest to perform EtA.
762  */
763 static int
764 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
765 {
766 	int error;
767 
768 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
769 		error = swcr_encdec(ses, crp);
770 		if (error == 0)
771 			error = swcr_authcompute(ses, crp);
772 	} else {
773 		error = swcr_authcompute(ses, crp);
774 		if (error == 0)
775 			error = swcr_encdec(ses, crp);
776 	}
777 	return (error);
778 }
779 
780 /*
781  * Apply a compression/decompression algorithm
782  */
783 static int
784 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
785 {
786 	u_int8_t *data, *out;
787 	struct comp_algo *cxf;
788 	int adj;
789 	u_int32_t result;
790 
791 	cxf = ses->swcr_compdec.sw_cxf;
792 
793 	/* We must handle the whole buffer of data in one time
794 	 * then if there is not all the data in the mbuf, we must
795 	 * copy in a buffer.
796 	 */
797 
798 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
799 	if (data == NULL)
800 		return (EINVAL);
801 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
802 	    data);
803 
804 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
805 		result = cxf->compress(data, crp->crp_payload_length, &out);
806 	else
807 		result = cxf->decompress(data, crp->crp_payload_length, &out);
808 
809 	free(data, M_CRYPTO_DATA);
810 	if (result == 0)
811 		return (EINVAL);
812 	crp->crp_olen = result;
813 
814 	/* Check the compressed size when doing compression */
815 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
816 		if (result >= crp->crp_payload_length) {
817 			/* Compression was useless, we lost time */
818 			free(out, M_CRYPTO_DATA);
819 			return (0);
820 		}
821 	}
822 
823 	/* Copy back the (de)compressed data. m_copyback is
824 	 * extending the mbuf as necessary.
825 	 */
826 	crypto_copyback(crp, crp->crp_payload_start, result, out);
827 	if (result < crp->crp_payload_length) {
828 		switch (crp->crp_buf_type) {
829 		case CRYPTO_BUF_MBUF:
830 			adj = result - crp->crp_payload_length;
831 			m_adj(crp->crp_mbuf, adj);
832 			break;
833 		case CRYPTO_BUF_UIO: {
834 			struct uio *uio = crp->crp_uio;
835 			int ind;
836 
837 			adj = crp->crp_payload_length - result;
838 			ind = uio->uio_iovcnt - 1;
839 
840 			while (adj > 0 && ind >= 0) {
841 				if (adj < uio->uio_iov[ind].iov_len) {
842 					uio->uio_iov[ind].iov_len -= adj;
843 					break;
844 				}
845 
846 				adj -= uio->uio_iov[ind].iov_len;
847 				uio->uio_iov[ind].iov_len = 0;
848 				ind--;
849 				uio->uio_iovcnt--;
850 			}
851 			}
852 			break;
853 		}
854 	}
855 	free(out, M_CRYPTO_DATA);
856 	return 0;
857 }
858 
859 static int
860 swcr_setup_encdec(struct swcr_session *ses,
861     const struct crypto_session_params *csp)
862 {
863 	struct swcr_encdec *swe;
864 	struct enc_xform *txf;
865 	int error;
866 
867 	swe = &ses->swcr_encdec;
868 	txf = crypto_cipher(csp);
869 	MPASS(txf->ivsize == csp->csp_ivlen);
870 	if (csp->csp_cipher_key != NULL) {
871 		error = txf->setkey(&swe->sw_kschedule,
872 		    csp->csp_cipher_key, csp->csp_cipher_klen);
873 		if (error)
874 			return (error);
875 	}
876 	swe->sw_exf = txf;
877 	return (0);
878 }
879 
880 static int
881 swcr_setup_auth(struct swcr_session *ses,
882     const struct crypto_session_params *csp)
883 {
884 	struct swcr_auth *swa;
885 	struct auth_hash *axf;
886 
887 	swa = &ses->swcr_auth;
888 
889 	axf = crypto_auth_hash(csp);
890 	swa->sw_axf = axf;
891 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
892 		return (EINVAL);
893 	if (csp->csp_auth_mlen == 0)
894 		swa->sw_mlen = axf->hashsize;
895 	else
896 		swa->sw_mlen = csp->csp_auth_mlen;
897 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
898 	if (swa->sw_ictx == NULL)
899 		return (ENOBUFS);
900 
901 	switch (csp->csp_auth_alg) {
902 	case CRYPTO_MD5_HMAC:
903 	case CRYPTO_SHA1_HMAC:
904 	case CRYPTO_SHA2_224_HMAC:
905 	case CRYPTO_SHA2_256_HMAC:
906 	case CRYPTO_SHA2_384_HMAC:
907 	case CRYPTO_SHA2_512_HMAC:
908 	case CRYPTO_NULL_HMAC:
909 	case CRYPTO_RIPEMD160_HMAC:
910 		swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
911 		    M_NOWAIT);
912 		if (swa->sw_octx == NULL)
913 			return (ENOBUFS);
914 
915 		if (csp->csp_auth_key != NULL) {
916 			swcr_authprepare(axf, swa, csp->csp_auth_key,
917 			    csp->csp_auth_klen);
918 		}
919 
920 		if (csp->csp_mode == CSP_MODE_DIGEST)
921 			ses->swcr_process = swcr_authcompute;
922 		break;
923 	case CRYPTO_SHA1:
924 	case CRYPTO_SHA2_224:
925 	case CRYPTO_SHA2_256:
926 	case CRYPTO_SHA2_384:
927 	case CRYPTO_SHA2_512:
928 		axf->Init(swa->sw_ictx);
929 		if (csp->csp_mode == CSP_MODE_DIGEST)
930 			ses->swcr_process = swcr_authcompute;
931 		break;
932 	case CRYPTO_AES_NIST_GMAC:
933 		axf->Init(swa->sw_ictx);
934 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
935 		    csp->csp_auth_klen);
936 		if (csp->csp_mode == CSP_MODE_DIGEST)
937 			ses->swcr_process = swcr_gmac;
938 		break;
939 	case CRYPTO_POLY1305:
940 	case CRYPTO_BLAKE2B:
941 	case CRYPTO_BLAKE2S:
942 		/*
943 		 * Blake2b and Blake2s support an optional key but do
944 		 * not require one.
945 		 */
946 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
947 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
948 			    csp->csp_auth_klen);
949 		axf->Init(swa->sw_ictx);
950 		if (csp->csp_mode == CSP_MODE_DIGEST)
951 			ses->swcr_process = swcr_authcompute;
952 		break;
953 	case CRYPTO_AES_CCM_CBC_MAC:
954 		axf->Init(swa->sw_ictx);
955 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
956 		    csp->csp_auth_klen);
957 		if (csp->csp_mode == CSP_MODE_DIGEST)
958 			ses->swcr_process = swcr_ccm_cbc_mac;
959 		break;
960 	}
961 
962 	return (0);
963 }
964 
965 static int
966 swcr_setup_gcm(struct swcr_session *ses,
967     const struct crypto_session_params *csp)
968 {
969 	struct swcr_encdec *swe;
970 	struct swcr_auth *swa;
971 	struct enc_xform *txf;
972 	struct auth_hash *axf;
973 	int error;
974 
975 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
976 		return (EINVAL);
977 
978 	/* First, setup the auth side. */
979 	swa = &ses->swcr_auth;
980 	switch (csp->csp_cipher_klen * 8) {
981 	case 128:
982 		axf = &auth_hash_nist_gmac_aes_128;
983 		break;
984 	case 192:
985 		axf = &auth_hash_nist_gmac_aes_192;
986 		break;
987 	case 256:
988 		axf = &auth_hash_nist_gmac_aes_256;
989 		break;
990 	default:
991 		return (EINVAL);
992 	}
993 	swa->sw_axf = axf;
994 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
995 		return (EINVAL);
996 	if (csp->csp_auth_mlen == 0)
997 		swa->sw_mlen = axf->hashsize;
998 	else
999 		swa->sw_mlen = csp->csp_auth_mlen;
1000 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1001 	if (swa->sw_ictx == NULL)
1002 		return (ENOBUFS);
1003 	axf->Init(swa->sw_ictx);
1004 	if (csp->csp_cipher_key != NULL)
1005 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1006 		    csp->csp_cipher_klen);
1007 
1008 	/* Second, setup the cipher side. */
1009 	swe = &ses->swcr_encdec;
1010 	txf = &enc_xform_aes_nist_gcm;
1011 	if (csp->csp_cipher_key != NULL) {
1012 		error = txf->setkey(&swe->sw_kschedule,
1013 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1014 		if (error)
1015 			return (error);
1016 	}
1017 	swe->sw_exf = txf;
1018 
1019 	return (0);
1020 }
1021 
1022 static int
1023 swcr_setup_ccm(struct swcr_session *ses,
1024     const struct crypto_session_params *csp)
1025 {
1026 	struct swcr_encdec *swe;
1027 	struct swcr_auth *swa;
1028 	struct enc_xform *txf;
1029 	struct auth_hash *axf;
1030 	int error;
1031 
1032 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1033 		return (EINVAL);
1034 
1035 	/* First, setup the auth side. */
1036 	swa = &ses->swcr_auth;
1037 	switch (csp->csp_cipher_klen * 8) {
1038 	case 128:
1039 		axf = &auth_hash_ccm_cbc_mac_128;
1040 		break;
1041 	case 192:
1042 		axf = &auth_hash_ccm_cbc_mac_192;
1043 		break;
1044 	case 256:
1045 		axf = &auth_hash_ccm_cbc_mac_256;
1046 		break;
1047 	default:
1048 		return (EINVAL);
1049 	}
1050 	swa->sw_axf = axf;
1051 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1052 		return (EINVAL);
1053 	if (csp->csp_auth_mlen == 0)
1054 		swa->sw_mlen = axf->hashsize;
1055 	else
1056 		swa->sw_mlen = csp->csp_auth_mlen;
1057 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1058 	if (swa->sw_ictx == NULL)
1059 		return (ENOBUFS);
1060 	axf->Init(swa->sw_ictx);
1061 	if (csp->csp_cipher_key != NULL)
1062 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1063 		    csp->csp_cipher_klen);
1064 
1065 	/* Second, setup the cipher side. */
1066 	swe = &ses->swcr_encdec;
1067 	txf = &enc_xform_ccm;
1068 	if (csp->csp_cipher_key != NULL) {
1069 		error = txf->setkey(&swe->sw_kschedule,
1070 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1071 		if (error)
1072 			return (error);
1073 	}
1074 	swe->sw_exf = txf;
1075 
1076 	return (0);
1077 }
1078 
1079 static bool
1080 swcr_auth_supported(const struct crypto_session_params *csp)
1081 {
1082 	struct auth_hash *axf;
1083 
1084 	axf = crypto_auth_hash(csp);
1085 	if (axf == NULL)
1086 		return (false);
1087 	switch (csp->csp_auth_alg) {
1088 	case CRYPTO_MD5_HMAC:
1089 	case CRYPTO_SHA1_HMAC:
1090 	case CRYPTO_SHA2_224_HMAC:
1091 	case CRYPTO_SHA2_256_HMAC:
1092 	case CRYPTO_SHA2_384_HMAC:
1093 	case CRYPTO_SHA2_512_HMAC:
1094 	case CRYPTO_NULL_HMAC:
1095 	case CRYPTO_RIPEMD160_HMAC:
1096 		break;
1097 	case CRYPTO_AES_NIST_GMAC:
1098 		switch (csp->csp_auth_klen * 8) {
1099 		case 128:
1100 		case 192:
1101 		case 256:
1102 			break;
1103 		default:
1104 			return (false);
1105 		}
1106 		if (csp->csp_auth_key == NULL)
1107 			return (false);
1108 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1109 			return (false);
1110 		break;
1111 	case CRYPTO_POLY1305:
1112 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1113 			return (false);
1114 		break;
1115 	case CRYPTO_AES_CCM_CBC_MAC:
1116 		switch (csp->csp_auth_klen * 8) {
1117 		case 128:
1118 		case 192:
1119 		case 256:
1120 			break;
1121 		default:
1122 			return (false);
1123 		}
1124 		if (csp->csp_auth_key == NULL)
1125 			return (false);
1126 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1127 			return (false);
1128 		break;
1129 	}
1130 	return (true);
1131 }
1132 
1133 static bool
1134 swcr_cipher_supported(const struct crypto_session_params *csp)
1135 {
1136 	struct enc_xform *txf;
1137 
1138 	txf = crypto_cipher(csp);
1139 	if (txf == NULL)
1140 		return (false);
1141 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1142 	    txf->ivsize != csp->csp_ivlen)
1143 		return (false);
1144 	return (true);
1145 }
1146 
1147 static int
1148 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1149 {
1150 
1151 	if (csp->csp_flags != 0)
1152 		return (EINVAL);
1153 	switch (csp->csp_mode) {
1154 	case CSP_MODE_COMPRESS:
1155 		switch (csp->csp_cipher_alg) {
1156 		case CRYPTO_DEFLATE_COMP:
1157 			break;
1158 		default:
1159 			return (EINVAL);
1160 		}
1161 		break;
1162 	case CSP_MODE_CIPHER:
1163 		switch (csp->csp_cipher_alg) {
1164 		case CRYPTO_AES_NIST_GCM_16:
1165 		case CRYPTO_AES_CCM_16:
1166 			return (EINVAL);
1167 		default:
1168 			if (!swcr_cipher_supported(csp))
1169 				return (EINVAL);
1170 			break;
1171 		}
1172 		break;
1173 	case CSP_MODE_DIGEST:
1174 		if (!swcr_auth_supported(csp))
1175 			return (EINVAL);
1176 		break;
1177 	case CSP_MODE_AEAD:
1178 		switch (csp->csp_cipher_alg) {
1179 		case CRYPTO_AES_NIST_GCM_16:
1180 		case CRYPTO_AES_CCM_16:
1181 			break;
1182 		default:
1183 			return (EINVAL);
1184 		}
1185 		break;
1186 	case CSP_MODE_ETA:
1187 		/* AEAD algorithms cannot be used for EtA. */
1188 		switch (csp->csp_cipher_alg) {
1189 		case CRYPTO_AES_NIST_GCM_16:
1190 		case CRYPTO_AES_CCM_16:
1191 			return (EINVAL);
1192 		}
1193 		switch (csp->csp_auth_alg) {
1194 		case CRYPTO_AES_NIST_GMAC:
1195 		case CRYPTO_AES_CCM_CBC_MAC:
1196 			return (EINVAL);
1197 		}
1198 
1199 		if (!swcr_cipher_supported(csp) ||
1200 		    !swcr_auth_supported(csp))
1201 			return (EINVAL);
1202 		break;
1203 	default:
1204 		return (EINVAL);
1205 	}
1206 
1207 	return (CRYPTODEV_PROBE_SOFTWARE);
1208 }
1209 
1210 /*
1211  * Generate a new software session.
1212  */
1213 static int
1214 swcr_newsession(device_t dev, crypto_session_t cses,
1215     const struct crypto_session_params *csp)
1216 {
1217 	struct swcr_session *ses;
1218 	struct swcr_encdec *swe;
1219 	struct swcr_auth *swa;
1220 	struct comp_algo *cxf;
1221 	int error;
1222 
1223 	ses = crypto_get_driver_session(cses);
1224 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1225 
1226 	error = 0;
1227 	swe = &ses->swcr_encdec;
1228 	swa = &ses->swcr_auth;
1229 	switch (csp->csp_mode) {
1230 	case CSP_MODE_COMPRESS:
1231 		switch (csp->csp_cipher_alg) {
1232 		case CRYPTO_DEFLATE_COMP:
1233 			cxf = &comp_algo_deflate;
1234 			break;
1235 #ifdef INVARIANTS
1236 		default:
1237 			panic("bad compression algo");
1238 #endif
1239 		}
1240 		ses->swcr_compdec.sw_cxf = cxf;
1241 		ses->swcr_process = swcr_compdec;
1242 		break;
1243 	case CSP_MODE_CIPHER:
1244 		switch (csp->csp_cipher_alg) {
1245 		case CRYPTO_NULL_CBC:
1246 			ses->swcr_process = swcr_null;
1247 			break;
1248 #ifdef INVARIANTS
1249 		case CRYPTO_AES_NIST_GCM_16:
1250 		case CRYPTO_AES_CCM_16:
1251 			panic("bad cipher algo");
1252 #endif
1253 		default:
1254 			error = swcr_setup_encdec(ses, csp);
1255 			if (error == 0)
1256 				ses->swcr_process = swcr_encdec;
1257 		}
1258 		break;
1259 	case CSP_MODE_DIGEST:
1260 		error = swcr_setup_auth(ses, csp);
1261 		break;
1262 	case CSP_MODE_AEAD:
1263 		switch (csp->csp_cipher_alg) {
1264 		case CRYPTO_AES_NIST_GCM_16:
1265 			error = swcr_setup_gcm(ses, csp);
1266 			if (error == 0)
1267 				ses->swcr_process = swcr_gcm;
1268 			break;
1269 		case CRYPTO_AES_CCM_16:
1270 			error = swcr_setup_ccm(ses, csp);
1271 			if (error == 0)
1272 				ses->swcr_process = swcr_ccm;
1273 			break;
1274 #ifdef INVARIANTS
1275 		default:
1276 			panic("bad aead algo");
1277 #endif
1278 		}
1279 		break;
1280 	case CSP_MODE_ETA:
1281 #ifdef INVARIANTS
1282 		switch (csp->csp_cipher_alg) {
1283 		case CRYPTO_AES_NIST_GCM_16:
1284 		case CRYPTO_AES_CCM_16:
1285 			panic("bad eta cipher algo");
1286 		}
1287 		switch (csp->csp_auth_alg) {
1288 		case CRYPTO_AES_NIST_GMAC:
1289 		case CRYPTO_AES_CCM_CBC_MAC:
1290 			panic("bad eta auth algo");
1291 		}
1292 #endif
1293 
1294 		error = swcr_setup_auth(ses, csp);
1295 		if (error)
1296 			break;
1297 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1298 			/* Effectively degrade to digest mode. */
1299 			ses->swcr_process = swcr_authcompute;
1300 			break;
1301 		}
1302 
1303 		error = swcr_setup_encdec(ses, csp);
1304 		if (error == 0)
1305 			ses->swcr_process = swcr_eta;
1306 		break;
1307 	default:
1308 		error = EINVAL;
1309 	}
1310 
1311 	if (error)
1312 		swcr_freesession(dev, cses);
1313 	return (error);
1314 }
1315 
1316 static void
1317 swcr_freesession(device_t dev, crypto_session_t cses)
1318 {
1319 	struct swcr_session *ses;
1320 	struct swcr_auth *swa;
1321 	struct enc_xform *txf;
1322 	struct auth_hash *axf;
1323 
1324 	ses = crypto_get_driver_session(cses);
1325 
1326 	mtx_destroy(&ses->swcr_lock);
1327 
1328 	txf = ses->swcr_encdec.sw_exf;
1329 	if (txf != NULL) {
1330 		if (ses->swcr_encdec.sw_kschedule != NULL)
1331 			txf->zerokey(&(ses->swcr_encdec.sw_kschedule));
1332 	}
1333 
1334 	axf = ses->swcr_auth.sw_axf;
1335 	if (axf != NULL) {
1336 		swa = &ses->swcr_auth;
1337 		if (swa->sw_ictx != NULL) {
1338 			explicit_bzero(swa->sw_ictx, axf->ctxsize);
1339 			free(swa->sw_ictx, M_CRYPTO_DATA);
1340 		}
1341 		if (swa->sw_octx != NULL) {
1342 			explicit_bzero(swa->sw_octx, axf->ctxsize);
1343 			free(swa->sw_octx, M_CRYPTO_DATA);
1344 		}
1345 	}
1346 }
1347 
1348 /*
1349  * Process a software request.
1350  */
1351 static int
1352 swcr_process(device_t dev, struct cryptop *crp, int hint)
1353 {
1354 	struct swcr_session *ses;
1355 
1356 	ses = crypto_get_driver_session(crp->crp_session);
1357 	mtx_lock(&ses->swcr_lock);
1358 
1359 	crp->crp_etype = ses->swcr_process(ses, crp);
1360 
1361 	mtx_unlock(&ses->swcr_lock);
1362 	crypto_done(crp);
1363 	return (0);
1364 }
1365 
1366 static void
1367 swcr_identify(driver_t *drv, device_t parent)
1368 {
1369 	/* NB: order 10 is so we get attached after h/w devices */
1370 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1371 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1372 		panic("cryptosoft: could not attach");
1373 }
1374 
1375 static int
1376 swcr_probe(device_t dev)
1377 {
1378 	device_set_desc(dev, "software crypto");
1379 	return (BUS_PROBE_NOWILDCARD);
1380 }
1381 
1382 static int
1383 swcr_attach(device_t dev)
1384 {
1385 
1386 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1387 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1388 	if (swcr_id < 0) {
1389 		device_printf(dev, "cannot initialize!");
1390 		return (ENXIO);
1391 	}
1392 
1393 	return (0);
1394 }
1395 
1396 static int
1397 swcr_detach(device_t dev)
1398 {
1399 	crypto_unregister_all(swcr_id);
1400 	return 0;
1401 }
1402 
1403 static device_method_t swcr_methods[] = {
1404 	DEVMETHOD(device_identify,	swcr_identify),
1405 	DEVMETHOD(device_probe,		swcr_probe),
1406 	DEVMETHOD(device_attach,	swcr_attach),
1407 	DEVMETHOD(device_detach,	swcr_detach),
1408 
1409 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1410 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1411 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1412 	DEVMETHOD(cryptodev_process,	swcr_process),
1413 
1414 	{0, 0},
1415 };
1416 
1417 static driver_t swcr_driver = {
1418 	"cryptosoft",
1419 	swcr_methods,
1420 	0,		/* NB: no softc */
1421 };
1422 static devclass_t swcr_devclass;
1423 
1424 /*
1425  * NB: We explicitly reference the crypto module so we
1426  * get the necessary ordering when built as a loadable
1427  * module.  This is required because we bundle the crypto
1428  * module code together with the cryptosoft driver (otherwise
1429  * normal module dependencies would handle things).
1430  */
1431 extern int crypto_modevent(struct module *, int, void *);
1432 /* XXX where to attach */
1433 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1434 MODULE_VERSION(cryptosoft, 1);
1435 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1436