xref: /freebsd/sys/opencrypto/cryptosoft.c (revision 780fb4a2fa9a9aee5ac48a60b790f567c0dc13e9)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 
49 #include <crypto/blowfish/blowfish.h>
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 #include <opencrypto/cast.h>
53 #include <opencrypto/skipjack.h>
54 #include <sys/md5.h>
55 
56 #include <opencrypto/cryptodev.h>
57 #include <opencrypto/cryptosoft.h>
58 #include <opencrypto/xform.h>
59 
60 #include <sys/kobj.h>
61 #include <sys/bus.h>
62 #include "cryptodev_if.h"
63 
64 static	int32_t swcr_id;
65 static	struct swcr_data **swcr_sessions = NULL;
66 static	u_int32_t swcr_sesnum;
67 /* Protects swcr_sessions pointer, not data. */
68 static	struct rwlock swcr_sessions_lock;
69 
70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
72 
73 static	int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
74 static	int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
75 static	int swcr_authenc(struct cryptop *crp);
76 static	int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
77 static	int swcr_freesession(device_t dev, u_int64_t tid);
78 static	int swcr_freesession_locked(device_t dev, u_int64_t tid);
79 
80 /*
81  * Apply a symmetric encryption/decryption algorithm.
82  */
83 static int
84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
85     int flags)
86 {
87 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
88 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
89 	struct enc_xform *exf;
90 	int i, j, k, blks, ind, count, ivlen;
91 	struct uio *uio, uiolcl;
92 	struct iovec iovlcl[4];
93 	struct iovec *iov;
94 	int iovcnt, iovalloc;
95 	int error;
96 
97 	error = 0;
98 
99 	exf = sw->sw_exf;
100 	blks = exf->blocksize;
101 	ivlen = exf->ivsize;
102 
103 	/* Check for non-padded data */
104 	if (crd->crd_len % blks)
105 		return EINVAL;
106 
107 	if (crd->crd_alg == CRYPTO_AES_ICM &&
108 	    (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
109 		return (EINVAL);
110 
111 	/* Initialize the IV */
112 	if (crd->crd_flags & CRD_F_ENCRYPT) {
113 		/* IV explicitly provided ? */
114 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
115 			bcopy(crd->crd_iv, iv, ivlen);
116 		else
117 			arc4rand(iv, ivlen, 0);
118 
119 		/* Do we need to write the IV */
120 		if (!(crd->crd_flags & CRD_F_IV_PRESENT))
121 			crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
122 
123 	} else {	/* Decryption */
124 		/* IV explicitly provided ? */
125 		if (crd->crd_flags & CRD_F_IV_EXPLICIT)
126 			bcopy(crd->crd_iv, iv, ivlen);
127 		else {
128 			/* Get IV off buf */
129 			crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
130 		}
131 	}
132 
133 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
134 		int error;
135 
136 		if (sw->sw_kschedule)
137 			exf->zerokey(&(sw->sw_kschedule));
138 
139 		error = exf->setkey(&sw->sw_kschedule,
140 				crd->crd_key, crd->crd_klen / 8);
141 		if (error)
142 			return (error);
143 	}
144 
145 	iov = iovlcl;
146 	iovcnt = nitems(iovlcl);
147 	iovalloc = 0;
148 	uio = &uiolcl;
149 	if ((flags & CRYPTO_F_IMBUF) != 0) {
150 		error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt,
151 		    &iovalloc);
152 		if (error)
153 			return (error);
154 		uio->uio_iov = iov;
155 		uio->uio_iovcnt = iovcnt;
156 	} else if ((flags & CRYPTO_F_IOV) != 0)
157 		uio = (struct uio *)buf;
158 	else {
159 		iov[0].iov_base = buf;
160 		iov[0].iov_len = crd->crd_skip + crd->crd_len;
161 		uio->uio_iov = iov;
162 		uio->uio_iovcnt = 1;
163 	}
164 
165 	ivp = iv;
166 
167 	if (exf->reinit) {
168 		/*
169 		 * xforms that provide a reinit method perform all IV
170 		 * handling themselves.
171 		 */
172 		exf->reinit(sw->sw_kschedule, iv);
173 	}
174 
175 	count = crd->crd_skip;
176 	ind = cuio_getptr(uio, count, &k);
177 	if (ind == -1) {
178 		error = EINVAL;
179 		goto out;
180 	}
181 
182 	i = crd->crd_len;
183 
184 	while (i > 0) {
185 		/*
186 		 * If there's insufficient data at the end of
187 		 * an iovec, we have to do some copying.
188 		 */
189 		if (uio->uio_iov[ind].iov_len < k + blks &&
190 		    uio->uio_iov[ind].iov_len != k) {
191 			cuio_copydata(uio, count, blks, blk);
192 
193 			/* Actual encryption/decryption */
194 			if (exf->reinit) {
195 				if (crd->crd_flags & CRD_F_ENCRYPT) {
196 					exf->encrypt(sw->sw_kschedule,
197 					    blk);
198 				} else {
199 					exf->decrypt(sw->sw_kschedule,
200 					    blk);
201 				}
202 			} else if (crd->crd_flags & CRD_F_ENCRYPT) {
203 				/* XOR with previous block */
204 				for (j = 0; j < blks; j++)
205 					blk[j] ^= ivp[j];
206 
207 				exf->encrypt(sw->sw_kschedule, blk);
208 
209 				/*
210 				 * Keep encrypted block for XOR'ing
211 				 * with next block
212 				 */
213 				bcopy(blk, iv, blks);
214 				ivp = iv;
215 			} else {	/* decrypt */
216 				/*
217 				 * Keep encrypted block for XOR'ing
218 				 * with next block
219 				 */
220 				nivp = (ivp == iv) ? iv2 : iv;
221 				bcopy(blk, nivp, blks);
222 
223 				exf->decrypt(sw->sw_kschedule, blk);
224 
225 				/* XOR with previous block */
226 				for (j = 0; j < blks; j++)
227 					blk[j] ^= ivp[j];
228 
229 				ivp = nivp;
230 			}
231 
232 			/* Copy back decrypted block */
233 			cuio_copyback(uio, count, blks, blk);
234 
235 			count += blks;
236 
237 			/* Advance pointer */
238 			ind = cuio_getptr(uio, count, &k);
239 			if (ind == -1) {
240 				error = EINVAL;
241 				goto out;
242 			}
243 
244 			i -= blks;
245 
246 			/* Could be done... */
247 			if (i == 0)
248 				break;
249 		}
250 
251 		while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
252 			uint8_t *idat;
253 			size_t nb, rem;
254 
255 			nb = blks;
256 			rem = MIN((size_t)i,
257 			    uio->uio_iov[ind].iov_len - (size_t)k);
258 			idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
259 
260 			if (exf->reinit) {
261 				if ((crd->crd_flags & CRD_F_ENCRYPT) != 0 &&
262 				    exf->encrypt_multi == NULL)
263 					exf->encrypt(sw->sw_kschedule,
264 					    idat);
265 				else if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) {
266 					nb = rounddown(rem, blks);
267 					exf->encrypt_multi(sw->sw_kschedule,
268 					    idat, nb);
269 				} else if (exf->decrypt_multi == NULL)
270 					exf->decrypt(sw->sw_kschedule,
271 					    idat);
272 				else {
273 					nb = rounddown(rem, blks);
274 					exf->decrypt_multi(sw->sw_kschedule,
275 					    idat, nb);
276 				}
277 			} else if (crd->crd_flags & CRD_F_ENCRYPT) {
278 				/* XOR with previous block/IV */
279 				for (j = 0; j < blks; j++)
280 					idat[j] ^= ivp[j];
281 
282 				exf->encrypt(sw->sw_kschedule, idat);
283 				ivp = idat;
284 			} else {	/* decrypt */
285 				/*
286 				 * Keep encrypted block to be used
287 				 * in next block's processing.
288 				 */
289 				nivp = (ivp == iv) ? iv2 : iv;
290 				bcopy(idat, nivp, blks);
291 
292 				exf->decrypt(sw->sw_kschedule, idat);
293 
294 				/* XOR with previous block/IV */
295 				for (j = 0; j < blks; j++)
296 					idat[j] ^= ivp[j];
297 
298 				ivp = nivp;
299 			}
300 
301 			count += nb;
302 			k += nb;
303 			i -= nb;
304 		}
305 
306 		/*
307 		 * Advance to the next iov if the end of the current iov
308 		 * is aligned with the end of a cipher block.
309 		 * Note that the code is equivalent to calling:
310 		 *      ind = cuio_getptr(uio, count, &k);
311 		 */
312 		if (i > 0 && k == uio->uio_iov[ind].iov_len) {
313 			k = 0;
314 			ind++;
315 			if (ind >= uio->uio_iovcnt) {
316 				error = EINVAL;
317 				goto out;
318 			}
319 		}
320 	}
321 
322 out:
323 	if (iovalloc)
324 		free(iov, M_CRYPTO_DATA);
325 
326 	return (error);
327 }
328 
329 static void
330 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
331     int klen)
332 {
333 	int k;
334 
335 	klen /= 8;
336 
337 	switch (axf->type) {
338 	case CRYPTO_MD5_HMAC:
339 	case CRYPTO_SHA1_HMAC:
340 	case CRYPTO_SHA2_224_HMAC:
341 	case CRYPTO_SHA2_256_HMAC:
342 	case CRYPTO_SHA2_384_HMAC:
343 	case CRYPTO_SHA2_512_HMAC:
344 	case CRYPTO_NULL_HMAC:
345 	case CRYPTO_RIPEMD160_HMAC:
346 		for (k = 0; k < klen; k++)
347 			key[k] ^= HMAC_IPAD_VAL;
348 
349 		axf->Init(sw->sw_ictx);
350 		axf->Update(sw->sw_ictx, key, klen);
351 		axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
352 
353 		for (k = 0; k < klen; k++)
354 			key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
355 
356 		axf->Init(sw->sw_octx);
357 		axf->Update(sw->sw_octx, key, klen);
358 		axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
359 
360 		for (k = 0; k < klen; k++)
361 			key[k] ^= HMAC_OPAD_VAL;
362 		break;
363 	case CRYPTO_MD5_KPDK:
364 	case CRYPTO_SHA1_KPDK:
365 	{
366 		/*
367 		 * We need a buffer that can hold an md5 and a sha1 result
368 		 * just to throw it away.
369 		 * What we do here is the initial part of:
370 		 *   ALGO( key, keyfill, .. )
371 		 * adding the key to sw_ictx and abusing Final() to get the
372 		 * "keyfill" padding.
373 		 * In addition we abuse the sw_octx to save the key to have
374 		 * it to be able to append it at the end in swcr_authcompute().
375 		 */
376 		u_char buf[SHA1_RESULTLEN];
377 
378 		sw->sw_klen = klen;
379 		bcopy(key, sw->sw_octx, klen);
380 		axf->Init(sw->sw_ictx);
381 		axf->Update(sw->sw_ictx, key, klen);
382 		axf->Final(buf, sw->sw_ictx);
383 		break;
384 	}
385 	case CRYPTO_BLAKE2B:
386 	case CRYPTO_BLAKE2S:
387 		axf->Setkey(sw->sw_ictx, key, klen);
388 		axf->Init(sw->sw_ictx);
389 		break;
390 	default:
391 		printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
392 		    "doesn't use keys.\n", __func__, axf->type);
393 	}
394 }
395 
396 /*
397  * Compute keyed-hash authenticator.
398  */
399 static int
400 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
401     int flags)
402 {
403 	unsigned char aalg[HASH_MAX_LEN];
404 	struct auth_hash *axf;
405 	union authctx ctx;
406 	int err;
407 
408 	if (sw->sw_ictx == 0)
409 		return EINVAL;
410 
411 	axf = sw->sw_axf;
412 
413 	if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
414 		swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
415 
416 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
417 
418 	err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
419 	    (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
420 	if (err)
421 		return err;
422 
423 	switch (sw->sw_alg) {
424 	case CRYPTO_SHA1:
425 	case CRYPTO_SHA2_224:
426 	case CRYPTO_SHA2_256:
427 	case CRYPTO_SHA2_384:
428 	case CRYPTO_SHA2_512:
429 		axf->Final(aalg, &ctx);
430 		break;
431 
432 	case CRYPTO_MD5_HMAC:
433 	case CRYPTO_SHA1_HMAC:
434 	case CRYPTO_SHA2_224_HMAC:
435 	case CRYPTO_SHA2_256_HMAC:
436 	case CRYPTO_SHA2_384_HMAC:
437 	case CRYPTO_SHA2_512_HMAC:
438 	case CRYPTO_RIPEMD160_HMAC:
439 		if (sw->sw_octx == NULL)
440 			return EINVAL;
441 
442 		axf->Final(aalg, &ctx);
443 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
444 		axf->Update(&ctx, aalg, axf->hashsize);
445 		axf->Final(aalg, &ctx);
446 		break;
447 
448 	case CRYPTO_MD5_KPDK:
449 	case CRYPTO_SHA1_KPDK:
450 		/* If we have no key saved, return error. */
451 		if (sw->sw_octx == NULL)
452 			return EINVAL;
453 
454 		/*
455 		 * Add the trailing copy of the key (see comment in
456 		 * swcr_authprepare()) after the data:
457 		 *   ALGO( .., key, algofill )
458 		 * and let Final() do the proper, natural "algofill"
459 		 * padding.
460 		 */
461 		axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
462 		axf->Final(aalg, &ctx);
463 		break;
464 
465 	case CRYPTO_BLAKE2B:
466 	case CRYPTO_BLAKE2S:
467 	case CRYPTO_NULL_HMAC:
468 		axf->Final(aalg, &ctx);
469 		break;
470 	}
471 
472 	/* Inject the authentication data */
473 	crypto_copyback(flags, buf, crd->crd_inject,
474 	    sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
475 	return 0;
476 }
477 
478 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
479 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
480 
481 /*
482  * Apply a combined encryption-authentication transformation
483  */
484 static int
485 swcr_authenc(struct cryptop *crp)
486 {
487 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
488 	u_char *blk = (u_char *)blkbuf;
489 	u_char aalg[AALG_MAX_RESULT_LEN];
490 	u_char uaalg[AALG_MAX_RESULT_LEN];
491 	u_char iv[EALG_MAX_BLOCK_LEN];
492 	union authctx ctx;
493 	struct cryptodesc *crd, *crda = NULL, *crde = NULL;
494 	struct swcr_data *sw, *swa, *swe = NULL;
495 	struct auth_hash *axf = NULL;
496 	struct enc_xform *exf = NULL;
497 	caddr_t buf = (caddr_t)crp->crp_buf;
498 	uint32_t *blkp;
499 	int aadlen, blksz, i, ivlen, len, iskip, oskip, r;
500 
501 	ivlen = blksz = iskip = oskip = 0;
502 
503 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
504 		for (sw = swcr_sessions[crp->crp_sid & 0xffffffff];
505 		     sw && sw->sw_alg != crd->crd_alg;
506 		     sw = sw->sw_next)
507 			;
508 		if (sw == NULL)
509 			return (EINVAL);
510 
511 		switch (sw->sw_alg) {
512 		case CRYPTO_AES_NIST_GCM_16:
513 		case CRYPTO_AES_NIST_GMAC:
514 			swe = sw;
515 			crde = crd;
516 			exf = swe->sw_exf;
517 			ivlen = 12;
518 			break;
519 		case CRYPTO_AES_128_NIST_GMAC:
520 		case CRYPTO_AES_192_NIST_GMAC:
521 		case CRYPTO_AES_256_NIST_GMAC:
522 			swa = sw;
523 			crda = crd;
524 			axf = swa->sw_axf;
525 			if (swa->sw_ictx == 0)
526 				return (EINVAL);
527 			bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
528 			blksz = axf->blocksize;
529 			break;
530 		default:
531 			return (EINVAL);
532 		}
533 	}
534 	if (crde == NULL || crda == NULL)
535 		return (EINVAL);
536 
537 	if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 &&
538 	    (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0)
539 		return (EINVAL);
540 
541 	if (crde->crd_klen != crda->crd_klen)
542 		return (EINVAL);
543 
544 	/* Initialize the IV */
545 	if (crde->crd_flags & CRD_F_ENCRYPT) {
546 		/* IV explicitly provided ? */
547 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
548 			bcopy(crde->crd_iv, iv, ivlen);
549 		else
550 			arc4rand(iv, ivlen, 0);
551 
552 		/* Do we need to write the IV */
553 		if (!(crde->crd_flags & CRD_F_IV_PRESENT))
554 			crypto_copyback(crp->crp_flags, buf, crde->crd_inject,
555 			    ivlen, iv);
556 
557 	} else {	/* Decryption */
558 			/* IV explicitly provided ? */
559 		if (crde->crd_flags & CRD_F_IV_EXPLICIT)
560 			bcopy(crde->crd_iv, iv, ivlen);
561 		else {
562 			/* Get IV off buf */
563 			crypto_copydata(crp->crp_flags, buf, crde->crd_inject,
564 			    ivlen, iv);
565 		}
566 	}
567 
568 	/* Supply MAC with IV */
569 	if (axf->Reinit)
570 		axf->Reinit(&ctx, iv, ivlen);
571 
572 	/* Supply MAC with AAD */
573 	aadlen = crda->crd_len;
574 
575 	for (i = iskip; i < crda->crd_len; i += blksz) {
576 		len = MIN(crda->crd_len - i, blksz - oskip);
577 		crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len,
578 		    blk + oskip);
579 		bzero(blk + len + oskip, blksz - len - oskip);
580 		axf->Update(&ctx, blk, blksz);
581 		oskip = 0; /* reset initial output offset */
582 	}
583 
584 	if (exf->reinit)
585 		exf->reinit(swe->sw_kschedule, iv);
586 
587 	/* Do encryption/decryption with MAC */
588 	for (i = 0; i < crde->crd_len; i += len) {
589 		if (exf->encrypt_multi != NULL) {
590 			len = rounddown(crde->crd_len - i, blksz);
591 			if (len == 0)
592 				len = blksz;
593 			else
594 				len = MIN(len, sizeof(blkbuf));
595 		} else
596 			len = blksz;
597 		len = MIN(crde->crd_len - i, len);
598 		if (len < blksz)
599 			bzero(blk, blksz);
600 		crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len,
601 		    blk);
602 		if (crde->crd_flags & CRD_F_ENCRYPT) {
603 			if (exf->encrypt_multi != NULL)
604 				exf->encrypt_multi(swe->sw_kschedule, blk,
605 				    len);
606 			else
607 				exf->encrypt(swe->sw_kschedule, blk);
608 			axf->Update(&ctx, blk, len);
609 			crypto_copyback(crp->crp_flags, buf,
610 			    crde->crd_skip + i, len, blk);
611 		} else {
612 			axf->Update(&ctx, blk, len);
613 		}
614 	}
615 
616 	/* Do any required special finalization */
617 	switch (crda->crd_alg) {
618 		case CRYPTO_AES_128_NIST_GMAC:
619 		case CRYPTO_AES_192_NIST_GMAC:
620 		case CRYPTO_AES_256_NIST_GMAC:
621 			/* length block */
622 			bzero(blk, blksz);
623 			blkp = (uint32_t *)blk + 1;
624 			*blkp = htobe32(aadlen * 8);
625 			blkp = (uint32_t *)blk + 3;
626 			*blkp = htobe32(crde->crd_len * 8);
627 			axf->Update(&ctx, blk, blksz);
628 			break;
629 	}
630 
631 	/* Finalize MAC */
632 	axf->Final(aalg, &ctx);
633 
634 	/* Validate tag */
635 	if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
636 		crypto_copydata(crp->crp_flags, buf, crda->crd_inject,
637 		    axf->hashsize, uaalg);
638 
639 		r = timingsafe_bcmp(aalg, uaalg, axf->hashsize);
640 		if (r == 0) {
641 			/* tag matches, decrypt data */
642 			for (i = 0; i < crde->crd_len; i += blksz) {
643 				len = MIN(crde->crd_len - i, blksz);
644 				if (len < blksz)
645 					bzero(blk, blksz);
646 				crypto_copydata(crp->crp_flags, buf,
647 				    crde->crd_skip + i, len, blk);
648 				exf->decrypt(swe->sw_kschedule, blk);
649 				crypto_copyback(crp->crp_flags, buf,
650 				    crde->crd_skip + i, len, blk);
651 			}
652 		} else
653 			return (EBADMSG);
654 	} else {
655 		/* Inject the authentication data */
656 		crypto_copyback(crp->crp_flags, buf, crda->crd_inject,
657 		    axf->hashsize, aalg);
658 	}
659 
660 	return (0);
661 }
662 
663 /*
664  * Apply a compression/decompression algorithm
665  */
666 static int
667 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
668     caddr_t buf, int flags)
669 {
670 	u_int8_t *data, *out;
671 	struct comp_algo *cxf;
672 	int adj;
673 	u_int32_t result;
674 
675 	cxf = sw->sw_cxf;
676 
677 	/* We must handle the whole buffer of data in one time
678 	 * then if there is not all the data in the mbuf, we must
679 	 * copy in a buffer.
680 	 */
681 
682 	data = malloc(crd->crd_len, M_CRYPTO_DATA,  M_NOWAIT);
683 	if (data == NULL)
684 		return (EINVAL);
685 	crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
686 
687 	if (crd->crd_flags & CRD_F_COMP)
688 		result = cxf->compress(data, crd->crd_len, &out);
689 	else
690 		result = cxf->decompress(data, crd->crd_len, &out);
691 
692 	free(data, M_CRYPTO_DATA);
693 	if (result == 0)
694 		return EINVAL;
695 
696 	/* Copy back the (de)compressed data. m_copyback is
697 	 * extending the mbuf as necessary.
698 	 */
699 	sw->sw_size = result;
700 	/* Check the compressed size when doing compression */
701 	if (crd->crd_flags & CRD_F_COMP) {
702 		if (result >= crd->crd_len) {
703 			/* Compression was useless, we lost time */
704 			free(out, M_CRYPTO_DATA);
705 			return 0;
706 		}
707 	}
708 
709 	crypto_copyback(flags, buf, crd->crd_skip, result, out);
710 	if (result < crd->crd_len) {
711 		adj = result - crd->crd_len;
712 		if (flags & CRYPTO_F_IMBUF) {
713 			adj = result - crd->crd_len;
714 			m_adj((struct mbuf *)buf, adj);
715 		} else if (flags & CRYPTO_F_IOV) {
716 			struct uio *uio = (struct uio *)buf;
717 			int ind;
718 
719 			adj = crd->crd_len - result;
720 			ind = uio->uio_iovcnt - 1;
721 
722 			while (adj > 0 && ind >= 0) {
723 				if (adj < uio->uio_iov[ind].iov_len) {
724 					uio->uio_iov[ind].iov_len -= adj;
725 					break;
726 				}
727 
728 				adj -= uio->uio_iov[ind].iov_len;
729 				uio->uio_iov[ind].iov_len = 0;
730 				ind--;
731 				uio->uio_iovcnt--;
732 			}
733 		}
734 	}
735 	free(out, M_CRYPTO_DATA);
736 	return 0;
737 }
738 
739 /*
740  * Generate a new software session.
741  */
742 static int
743 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
744 {
745 	struct swcr_data **swd;
746 	struct auth_hash *axf;
747 	struct enc_xform *txf;
748 	struct comp_algo *cxf;
749 	u_int32_t i;
750 	int len;
751 	int error;
752 
753 	if (sid == NULL || cri == NULL)
754 		return EINVAL;
755 
756 	rw_wlock(&swcr_sessions_lock);
757 	if (swcr_sessions) {
758 		for (i = 1; i < swcr_sesnum; i++)
759 			if (swcr_sessions[i] == NULL)
760 				break;
761 	} else
762 		i = 1;		/* NB: to silence compiler warning */
763 
764 	if (swcr_sessions == NULL || i == swcr_sesnum) {
765 		if (swcr_sessions == NULL) {
766 			i = 1; /* We leave swcr_sessions[0] empty */
767 			swcr_sesnum = CRYPTO_SW_SESSIONS;
768 		} else
769 			swcr_sesnum *= 2;
770 
771 		swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
772 		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
773 		if (swd == NULL) {
774 			/* Reset session number */
775 			if (swcr_sesnum == CRYPTO_SW_SESSIONS)
776 				swcr_sesnum = 0;
777 			else
778 				swcr_sesnum /= 2;
779 			rw_wunlock(&swcr_sessions_lock);
780 			return ENOBUFS;
781 		}
782 
783 		/* Copy existing sessions */
784 		if (swcr_sessions != NULL) {
785 			bcopy(swcr_sessions, swd,
786 			    (swcr_sesnum / 2) * sizeof(struct swcr_data *));
787 			free(swcr_sessions, M_CRYPTO_DATA);
788 		}
789 
790 		swcr_sessions = swd;
791 	}
792 
793 	rw_downgrade(&swcr_sessions_lock);
794 	swd = &swcr_sessions[i];
795 	*sid = i;
796 
797 	while (cri) {
798 		*swd = malloc(sizeof(struct swcr_data),
799 		    M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
800 		if (*swd == NULL) {
801 			swcr_freesession_locked(dev, i);
802 			rw_runlock(&swcr_sessions_lock);
803 			return ENOBUFS;
804 		}
805 
806 		switch (cri->cri_alg) {
807 		case CRYPTO_DES_CBC:
808 			txf = &enc_xform_des;
809 			goto enccommon;
810 		case CRYPTO_3DES_CBC:
811 			txf = &enc_xform_3des;
812 			goto enccommon;
813 		case CRYPTO_BLF_CBC:
814 			txf = &enc_xform_blf;
815 			goto enccommon;
816 		case CRYPTO_CAST_CBC:
817 			txf = &enc_xform_cast5;
818 			goto enccommon;
819 		case CRYPTO_SKIPJACK_CBC:
820 			txf = &enc_xform_skipjack;
821 			goto enccommon;
822 		case CRYPTO_RIJNDAEL128_CBC:
823 			txf = &enc_xform_rijndael128;
824 			goto enccommon;
825 		case CRYPTO_AES_XTS:
826 			txf = &enc_xform_aes_xts;
827 			goto enccommon;
828 		case CRYPTO_AES_ICM:
829 			txf = &enc_xform_aes_icm;
830 			goto enccommon;
831 		case CRYPTO_AES_NIST_GCM_16:
832 			txf = &enc_xform_aes_nist_gcm;
833 			goto enccommon;
834 		case CRYPTO_AES_NIST_GMAC:
835 			txf = &enc_xform_aes_nist_gmac;
836 			(*swd)->sw_exf = txf;
837 			break;
838 		case CRYPTO_CAMELLIA_CBC:
839 			txf = &enc_xform_camellia;
840 			goto enccommon;
841 		case CRYPTO_NULL_CBC:
842 			txf = &enc_xform_null;
843 			goto enccommon;
844 		case CRYPTO_CHACHA20:
845 			txf = &enc_xform_chacha20;
846 			goto enccommon;
847 		enccommon:
848 			if (cri->cri_key != NULL) {
849 				error = txf->setkey(&((*swd)->sw_kschedule),
850 				    cri->cri_key, cri->cri_klen / 8);
851 				if (error) {
852 					swcr_freesession_locked(dev, i);
853 					rw_runlock(&swcr_sessions_lock);
854 					return error;
855 				}
856 			}
857 			(*swd)->sw_exf = txf;
858 			break;
859 
860 		case CRYPTO_MD5_HMAC:
861 			axf = &auth_hash_hmac_md5;
862 			goto authcommon;
863 		case CRYPTO_SHA1_HMAC:
864 			axf = &auth_hash_hmac_sha1;
865 			goto authcommon;
866 		case CRYPTO_SHA2_224_HMAC:
867 			axf = &auth_hash_hmac_sha2_224;
868 			goto authcommon;
869 		case CRYPTO_SHA2_256_HMAC:
870 			axf = &auth_hash_hmac_sha2_256;
871 			goto authcommon;
872 		case CRYPTO_SHA2_384_HMAC:
873 			axf = &auth_hash_hmac_sha2_384;
874 			goto authcommon;
875 		case CRYPTO_SHA2_512_HMAC:
876 			axf = &auth_hash_hmac_sha2_512;
877 			goto authcommon;
878 		case CRYPTO_NULL_HMAC:
879 			axf = &auth_hash_null;
880 			goto authcommon;
881 		case CRYPTO_RIPEMD160_HMAC:
882 			axf = &auth_hash_hmac_ripemd_160;
883 		authcommon:
884 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
885 			    M_NOWAIT);
886 			if ((*swd)->sw_ictx == NULL) {
887 				swcr_freesession_locked(dev, i);
888 				rw_runlock(&swcr_sessions_lock);
889 				return ENOBUFS;
890 			}
891 
892 			(*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
893 			    M_NOWAIT);
894 			if ((*swd)->sw_octx == NULL) {
895 				swcr_freesession_locked(dev, i);
896 				rw_runlock(&swcr_sessions_lock);
897 				return ENOBUFS;
898 			}
899 
900 			if (cri->cri_key != NULL) {
901 				swcr_authprepare(axf, *swd, cri->cri_key,
902 				    cri->cri_klen);
903 			}
904 
905 			(*swd)->sw_mlen = cri->cri_mlen;
906 			(*swd)->sw_axf = axf;
907 			break;
908 
909 		case CRYPTO_MD5_KPDK:
910 			axf = &auth_hash_key_md5;
911 			goto auth2common;
912 
913 		case CRYPTO_SHA1_KPDK:
914 			axf = &auth_hash_key_sha1;
915 		auth2common:
916 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
917 			    M_NOWAIT);
918 			if ((*swd)->sw_ictx == NULL) {
919 				swcr_freesession_locked(dev, i);
920 				rw_runlock(&swcr_sessions_lock);
921 				return ENOBUFS;
922 			}
923 
924 			(*swd)->sw_octx = malloc(cri->cri_klen / 8,
925 			    M_CRYPTO_DATA, M_NOWAIT);
926 			if ((*swd)->sw_octx == NULL) {
927 				swcr_freesession_locked(dev, i);
928 				rw_runlock(&swcr_sessions_lock);
929 				return ENOBUFS;
930 			}
931 
932 			/* Store the key so we can "append" it to the payload */
933 			if (cri->cri_key != NULL) {
934 				swcr_authprepare(axf, *swd, cri->cri_key,
935 				    cri->cri_klen);
936 			}
937 
938 			(*swd)->sw_mlen = cri->cri_mlen;
939 			(*swd)->sw_axf = axf;
940 			break;
941 #ifdef notdef
942 		case CRYPTO_MD5:
943 			axf = &auth_hash_md5;
944 			goto auth3common;
945 #endif
946 
947 		case CRYPTO_SHA1:
948 			axf = &auth_hash_sha1;
949 			goto auth3common;
950 		case CRYPTO_SHA2_224:
951 			axf = &auth_hash_sha2_224;
952 			goto auth3common;
953 		case CRYPTO_SHA2_256:
954 			axf = &auth_hash_sha2_256;
955 			goto auth3common;
956 		case CRYPTO_SHA2_384:
957 			axf = &auth_hash_sha2_384;
958 			goto auth3common;
959 		case CRYPTO_SHA2_512:
960 			axf = &auth_hash_sha2_512;
961 
962 		auth3common:
963 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
964 			    M_NOWAIT);
965 			if ((*swd)->sw_ictx == NULL) {
966 				swcr_freesession_locked(dev, i);
967 				rw_runlock(&swcr_sessions_lock);
968 				return ENOBUFS;
969 			}
970 
971 			axf->Init((*swd)->sw_ictx);
972 			(*swd)->sw_mlen = cri->cri_mlen;
973 			(*swd)->sw_axf = axf;
974 			break;
975 
976 		case CRYPTO_AES_128_NIST_GMAC:
977 			axf = &auth_hash_nist_gmac_aes_128;
978 			goto auth4common;
979 
980 		case CRYPTO_AES_192_NIST_GMAC:
981 			axf = &auth_hash_nist_gmac_aes_192;
982 			goto auth4common;
983 
984 		case CRYPTO_AES_256_NIST_GMAC:
985 			axf = &auth_hash_nist_gmac_aes_256;
986 		auth4common:
987 			len = cri->cri_klen / 8;
988 			if (len != 16 && len != 24 && len != 32) {
989 				swcr_freesession_locked(dev, i);
990 				rw_runlock(&swcr_sessions_lock);
991 				return EINVAL;
992 			}
993 
994 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
995 			    M_NOWAIT);
996 			if ((*swd)->sw_ictx == NULL) {
997 				swcr_freesession_locked(dev, i);
998 				rw_runlock(&swcr_sessions_lock);
999 				return ENOBUFS;
1000 			}
1001 			axf->Init((*swd)->sw_ictx);
1002 			axf->Setkey((*swd)->sw_ictx, cri->cri_key, len);
1003 			(*swd)->sw_axf = axf;
1004 			break;
1005 
1006 		case CRYPTO_BLAKE2B:
1007 			axf = &auth_hash_blake2b;
1008 			goto auth5common;
1009 		case CRYPTO_BLAKE2S:
1010 			axf = &auth_hash_blake2s;
1011 		auth5common:
1012 			(*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1013 			    M_NOWAIT);
1014 			if ((*swd)->sw_ictx == NULL) {
1015 				swcr_freesession_locked(dev, i);
1016 				rw_runlock(&swcr_sessions_lock);
1017 				return ENOBUFS;
1018 			}
1019 			axf->Setkey((*swd)->sw_ictx, cri->cri_key,
1020 			    cri->cri_klen / 8);
1021 			axf->Init((*swd)->sw_ictx);
1022 			(*swd)->sw_axf = axf;
1023 			break;
1024 
1025 		case CRYPTO_DEFLATE_COMP:
1026 			cxf = &comp_algo_deflate;
1027 			(*swd)->sw_cxf = cxf;
1028 			break;
1029 		default:
1030 			swcr_freesession_locked(dev, i);
1031 			rw_runlock(&swcr_sessions_lock);
1032 			return EINVAL;
1033 		}
1034 
1035 		(*swd)->sw_alg = cri->cri_alg;
1036 		cri = cri->cri_next;
1037 		swd = &((*swd)->sw_next);
1038 	}
1039 	rw_runlock(&swcr_sessions_lock);
1040 	return 0;
1041 }
1042 
1043 static int
1044 swcr_freesession(device_t dev, u_int64_t tid)
1045 {
1046 	int error;
1047 
1048 	rw_rlock(&swcr_sessions_lock);
1049 	error = swcr_freesession_locked(dev, tid);
1050 	rw_runlock(&swcr_sessions_lock);
1051 	return error;
1052 }
1053 
1054 /*
1055  * Free a session.
1056  */
1057 static int
1058 swcr_freesession_locked(device_t dev, u_int64_t tid)
1059 {
1060 	struct swcr_data *swd;
1061 	struct enc_xform *txf;
1062 	struct auth_hash *axf;
1063 	u_int32_t sid = CRYPTO_SESID2LID(tid);
1064 
1065 	if (sid > swcr_sesnum || swcr_sessions == NULL ||
1066 	    swcr_sessions[sid] == NULL)
1067 		return EINVAL;
1068 
1069 	/* Silently accept and return */
1070 	if (sid == 0)
1071 		return 0;
1072 
1073 	while ((swd = swcr_sessions[sid]) != NULL) {
1074 		swcr_sessions[sid] = swd->sw_next;
1075 
1076 		switch (swd->sw_alg) {
1077 		case CRYPTO_DES_CBC:
1078 		case CRYPTO_3DES_CBC:
1079 		case CRYPTO_BLF_CBC:
1080 		case CRYPTO_CAST_CBC:
1081 		case CRYPTO_SKIPJACK_CBC:
1082 		case CRYPTO_RIJNDAEL128_CBC:
1083 		case CRYPTO_AES_XTS:
1084 		case CRYPTO_AES_ICM:
1085 		case CRYPTO_AES_NIST_GCM_16:
1086 		case CRYPTO_AES_NIST_GMAC:
1087 		case CRYPTO_CAMELLIA_CBC:
1088 		case CRYPTO_NULL_CBC:
1089 		case CRYPTO_CHACHA20:
1090 			txf = swd->sw_exf;
1091 
1092 			if (swd->sw_kschedule)
1093 				txf->zerokey(&(swd->sw_kschedule));
1094 			break;
1095 
1096 		case CRYPTO_MD5_HMAC:
1097 		case CRYPTO_SHA1_HMAC:
1098 		case CRYPTO_SHA2_224_HMAC:
1099 		case CRYPTO_SHA2_256_HMAC:
1100 		case CRYPTO_SHA2_384_HMAC:
1101 		case CRYPTO_SHA2_512_HMAC:
1102 		case CRYPTO_RIPEMD160_HMAC:
1103 		case CRYPTO_NULL_HMAC:
1104 			axf = swd->sw_axf;
1105 
1106 			if (swd->sw_ictx) {
1107 				bzero(swd->sw_ictx, axf->ctxsize);
1108 				free(swd->sw_ictx, M_CRYPTO_DATA);
1109 			}
1110 			if (swd->sw_octx) {
1111 				bzero(swd->sw_octx, axf->ctxsize);
1112 				free(swd->sw_octx, M_CRYPTO_DATA);
1113 			}
1114 			break;
1115 
1116 		case CRYPTO_MD5_KPDK:
1117 		case CRYPTO_SHA1_KPDK:
1118 			axf = swd->sw_axf;
1119 
1120 			if (swd->sw_ictx) {
1121 				bzero(swd->sw_ictx, axf->ctxsize);
1122 				free(swd->sw_ictx, M_CRYPTO_DATA);
1123 			}
1124 			if (swd->sw_octx) {
1125 				bzero(swd->sw_octx, swd->sw_klen);
1126 				free(swd->sw_octx, M_CRYPTO_DATA);
1127 			}
1128 			break;
1129 
1130 		case CRYPTO_BLAKE2B:
1131 		case CRYPTO_BLAKE2S:
1132 		case CRYPTO_MD5:
1133 		case CRYPTO_SHA1:
1134 		case CRYPTO_SHA2_224:
1135 		case CRYPTO_SHA2_256:
1136 		case CRYPTO_SHA2_384:
1137 		case CRYPTO_SHA2_512:
1138 			axf = swd->sw_axf;
1139 
1140 			if (swd->sw_ictx) {
1141 				explicit_bzero(swd->sw_ictx, axf->ctxsize);
1142 				free(swd->sw_ictx, M_CRYPTO_DATA);
1143 			}
1144 			break;
1145 
1146 		case CRYPTO_DEFLATE_COMP:
1147 			/* Nothing to do */
1148 			break;
1149 		}
1150 
1151 		free(swd, M_CRYPTO_DATA);
1152 	}
1153 	return 0;
1154 }
1155 
1156 /*
1157  * Process a software request.
1158  */
1159 static int
1160 swcr_process(device_t dev, struct cryptop *crp, int hint)
1161 {
1162 	struct cryptodesc *crd;
1163 	struct swcr_data *sw;
1164 	u_int32_t lid;
1165 
1166 	/* Sanity check */
1167 	if (crp == NULL)
1168 		return EINVAL;
1169 
1170 	if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
1171 		crp->crp_etype = EINVAL;
1172 		goto done;
1173 	}
1174 
1175 	lid = CRYPTO_SESID2LID(crp->crp_sid);
1176 	rw_rlock(&swcr_sessions_lock);
1177 	if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 ||
1178 	    swcr_sessions[lid] == NULL) {
1179 		rw_runlock(&swcr_sessions_lock);
1180 		crp->crp_etype = ENOENT;
1181 		goto done;
1182 	}
1183 	rw_runlock(&swcr_sessions_lock);
1184 
1185 	/* Go through crypto descriptors, processing as we go */
1186 	for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1187 		/*
1188 		 * Find the crypto context.
1189 		 *
1190 		 * XXX Note that the logic here prevents us from having
1191 		 * XXX the same algorithm multiple times in a session
1192 		 * XXX (or rather, we can but it won't give us the right
1193 		 * XXX results). To do that, we'd need some way of differentiating
1194 		 * XXX between the various instances of an algorithm (so we can
1195 		 * XXX locate the correct crypto context).
1196 		 */
1197 		rw_rlock(&swcr_sessions_lock);
1198 		if (swcr_sessions == NULL) {
1199 			rw_runlock(&swcr_sessions_lock);
1200 			crp->crp_etype = ENOENT;
1201 			goto done;
1202 		}
1203 		for (sw = swcr_sessions[lid];
1204 		    sw && sw->sw_alg != crd->crd_alg;
1205 		    sw = sw->sw_next)
1206 			;
1207 		rw_runlock(&swcr_sessions_lock);
1208 
1209 		/* No such context ? */
1210 		if (sw == NULL) {
1211 			crp->crp_etype = EINVAL;
1212 			goto done;
1213 		}
1214 		switch (sw->sw_alg) {
1215 		case CRYPTO_DES_CBC:
1216 		case CRYPTO_3DES_CBC:
1217 		case CRYPTO_BLF_CBC:
1218 		case CRYPTO_CAST_CBC:
1219 		case CRYPTO_SKIPJACK_CBC:
1220 		case CRYPTO_RIJNDAEL128_CBC:
1221 		case CRYPTO_AES_XTS:
1222 		case CRYPTO_AES_ICM:
1223 		case CRYPTO_CAMELLIA_CBC:
1224 		case CRYPTO_CHACHA20:
1225 			if ((crp->crp_etype = swcr_encdec(crd, sw,
1226 			    crp->crp_buf, crp->crp_flags)) != 0)
1227 				goto done;
1228 			break;
1229 		case CRYPTO_NULL_CBC:
1230 			crp->crp_etype = 0;
1231 			break;
1232 		case CRYPTO_MD5_HMAC:
1233 		case CRYPTO_SHA1_HMAC:
1234 		case CRYPTO_SHA2_224_HMAC:
1235 		case CRYPTO_SHA2_256_HMAC:
1236 		case CRYPTO_SHA2_384_HMAC:
1237 		case CRYPTO_SHA2_512_HMAC:
1238 		case CRYPTO_RIPEMD160_HMAC:
1239 		case CRYPTO_NULL_HMAC:
1240 		case CRYPTO_MD5_KPDK:
1241 		case CRYPTO_SHA1_KPDK:
1242 		case CRYPTO_MD5:
1243 		case CRYPTO_SHA1:
1244 		case CRYPTO_SHA2_224:
1245 		case CRYPTO_SHA2_256:
1246 		case CRYPTO_SHA2_384:
1247 		case CRYPTO_SHA2_512:
1248 		case CRYPTO_BLAKE2B:
1249 		case CRYPTO_BLAKE2S:
1250 			if ((crp->crp_etype = swcr_authcompute(crd, sw,
1251 			    crp->crp_buf, crp->crp_flags)) != 0)
1252 				goto done;
1253 			break;
1254 
1255 		case CRYPTO_AES_NIST_GCM_16:
1256 		case CRYPTO_AES_NIST_GMAC:
1257 		case CRYPTO_AES_128_NIST_GMAC:
1258 		case CRYPTO_AES_192_NIST_GMAC:
1259 		case CRYPTO_AES_256_NIST_GMAC:
1260 			crp->crp_etype = swcr_authenc(crp);
1261 			goto done;
1262 
1263 		case CRYPTO_DEFLATE_COMP:
1264 			if ((crp->crp_etype = swcr_compdec(crd, sw,
1265 			    crp->crp_buf, crp->crp_flags)) != 0)
1266 				goto done;
1267 			else
1268 				crp->crp_olen = (int)sw->sw_size;
1269 			break;
1270 
1271 		default:
1272 			/* Unknown/unsupported algorithm */
1273 			crp->crp_etype = EINVAL;
1274 			goto done;
1275 		}
1276 	}
1277 
1278 done:
1279 	crypto_done(crp);
1280 	return 0;
1281 }
1282 
1283 static void
1284 swcr_identify(driver_t *drv, device_t parent)
1285 {
1286 	/* NB: order 10 is so we get attached after h/w devices */
1287 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1288 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1289 		panic("cryptosoft: could not attach");
1290 }
1291 
1292 static int
1293 swcr_probe(device_t dev)
1294 {
1295 	device_set_desc(dev, "software crypto");
1296 	return (BUS_PROBE_NOWILDCARD);
1297 }
1298 
1299 static int
1300 swcr_attach(device_t dev)
1301 {
1302 	rw_init(&swcr_sessions_lock, "swcr_sessions_lock");
1303 	memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
1304 	memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
1305 
1306 	swcr_id = crypto_get_driverid(dev,
1307 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1308 	if (swcr_id < 0) {
1309 		device_printf(dev, "cannot initialize!");
1310 		return ENOMEM;
1311 	}
1312 #define	REGISTER(alg) \
1313 	crypto_register(swcr_id, alg, 0,0)
1314 	REGISTER(CRYPTO_DES_CBC);
1315 	REGISTER(CRYPTO_3DES_CBC);
1316 	REGISTER(CRYPTO_BLF_CBC);
1317 	REGISTER(CRYPTO_CAST_CBC);
1318 	REGISTER(CRYPTO_SKIPJACK_CBC);
1319 	REGISTER(CRYPTO_NULL_CBC);
1320 	REGISTER(CRYPTO_MD5_HMAC);
1321 	REGISTER(CRYPTO_SHA1_HMAC);
1322 	REGISTER(CRYPTO_SHA2_224_HMAC);
1323 	REGISTER(CRYPTO_SHA2_256_HMAC);
1324 	REGISTER(CRYPTO_SHA2_384_HMAC);
1325 	REGISTER(CRYPTO_SHA2_512_HMAC);
1326 	REGISTER(CRYPTO_RIPEMD160_HMAC);
1327 	REGISTER(CRYPTO_NULL_HMAC);
1328 	REGISTER(CRYPTO_MD5_KPDK);
1329 	REGISTER(CRYPTO_SHA1_KPDK);
1330 	REGISTER(CRYPTO_MD5);
1331 	REGISTER(CRYPTO_SHA1);
1332 	REGISTER(CRYPTO_SHA2_224);
1333 	REGISTER(CRYPTO_SHA2_256);
1334 	REGISTER(CRYPTO_SHA2_384);
1335 	REGISTER(CRYPTO_SHA2_512);
1336 	REGISTER(CRYPTO_RIJNDAEL128_CBC);
1337 	REGISTER(CRYPTO_AES_XTS);
1338 	REGISTER(CRYPTO_AES_ICM);
1339 	REGISTER(CRYPTO_AES_NIST_GCM_16);
1340 	REGISTER(CRYPTO_AES_NIST_GMAC);
1341 	REGISTER(CRYPTO_AES_128_NIST_GMAC);
1342 	REGISTER(CRYPTO_AES_192_NIST_GMAC);
1343 	REGISTER(CRYPTO_AES_256_NIST_GMAC);
1344  	REGISTER(CRYPTO_CAMELLIA_CBC);
1345 	REGISTER(CRYPTO_DEFLATE_COMP);
1346 	REGISTER(CRYPTO_BLAKE2B);
1347 	REGISTER(CRYPTO_BLAKE2S);
1348 	REGISTER(CRYPTO_CHACHA20);
1349 #undef REGISTER
1350 
1351 	return 0;
1352 }
1353 
1354 static int
1355 swcr_detach(device_t dev)
1356 {
1357 	crypto_unregister_all(swcr_id);
1358 	rw_wlock(&swcr_sessions_lock);
1359 	free(swcr_sessions, M_CRYPTO_DATA);
1360 	swcr_sessions = NULL;
1361 	rw_wunlock(&swcr_sessions_lock);
1362 	rw_destroy(&swcr_sessions_lock);
1363 	return 0;
1364 }
1365 
1366 static device_method_t swcr_methods[] = {
1367 	DEVMETHOD(device_identify,	swcr_identify),
1368 	DEVMETHOD(device_probe,		swcr_probe),
1369 	DEVMETHOD(device_attach,	swcr_attach),
1370 	DEVMETHOD(device_detach,	swcr_detach),
1371 
1372 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1373 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1374 	DEVMETHOD(cryptodev_process,	swcr_process),
1375 
1376 	{0, 0},
1377 };
1378 
1379 static driver_t swcr_driver = {
1380 	"cryptosoft",
1381 	swcr_methods,
1382 	0,		/* NB: no softc */
1383 };
1384 static devclass_t swcr_devclass;
1385 
1386 /*
1387  * NB: We explicitly reference the crypto module so we
1388  * get the necessary ordering when built as a loadable
1389  * module.  This is required because we bundle the crypto
1390  * module code together with the cryptosoft driver (otherwise
1391  * normal module dependencies would handle things).
1392  */
1393 extern int crypto_modevent(struct module *, int, void *);
1394 /* XXX where to attach */
1395 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1396 MODULE_VERSION(cryptosoft, 1);
1397 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1398