xref: /freebsd/sys/opencrypto/cryptosoft.c (revision cf5537182738ceb2822a2b1daaaf38b091ac3fd6)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 };
66 
67 struct swcr_encdec {
68 	uint8_t		*sw_kschedule;
69 	struct enc_xform *sw_exf;
70 };
71 
72 struct swcr_compdec {
73 	struct comp_algo *sw_cxf;
74 };
75 
76 struct swcr_session {
77 	struct mtx	swcr_lock;
78 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	const struct crypto_session_params *csp;
106 	struct swcr_encdec *sw;
107 	struct enc_xform *exf;
108 	int i, j, k, blks, ind, count, ivlen;
109 	struct uio *uio, uiolcl;
110 	struct iovec iovlcl[4];
111 	struct iovec *iov;
112 	int iovcnt, iovalloc;
113 	int error;
114 	bool encrypting;
115 
116 	error = 0;
117 
118 	sw = &ses->swcr_encdec;
119 	exf = sw->sw_exf;
120 	blks = exf->blocksize;
121 	ivlen = exf->ivsize;
122 
123 	/* Check for non-padded data */
124 	if ((crp->crp_payload_length % blks) != 0)
125 		return EINVAL;
126 
127 	if (exf == &enc_xform_aes_icm &&
128 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
129 		return (EINVAL);
130 
131 	crypto_read_iv(crp, iv);
132 
133 	if (crp->crp_cipher_key != NULL) {
134 		if (sw->sw_kschedule)
135 			exf->zerokey(&(sw->sw_kschedule));
136 
137 		csp = crypto_get_params(crp->crp_session);
138 		error = exf->setkey(&sw->sw_kschedule,
139 		    crp->crp_cipher_key, csp->csp_cipher_klen);
140 		if (error)
141 			return (error);
142 	}
143 
144 	iov = iovlcl;
145 	iovcnt = nitems(iovlcl);
146 	iovalloc = 0;
147 	uio = &uiolcl;
148 	switch (crp->crp_buf_type) {
149 	case CRYPTO_BUF_MBUF:
150 		error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt,
151 		    &iovalloc);
152 		if (error)
153 			return (error);
154 		uio->uio_iov = iov;
155 		uio->uio_iovcnt = iovcnt;
156 		break;
157 	case CRYPTO_BUF_UIO:
158 		uio = crp->crp_uio;
159 		break;
160 	case CRYPTO_BUF_CONTIG:
161 		iov[0].iov_base = crp->crp_buf;
162 		iov[0].iov_len = crp->crp_ilen;
163 		uio->uio_iov = iov;
164 		uio->uio_iovcnt = 1;
165 		break;
166 	}
167 
168 	ivp = iv;
169 
170 	if (exf->reinit) {
171 		/*
172 		 * xforms that provide a reinit method perform all IV
173 		 * handling themselves.
174 		 */
175 		exf->reinit(sw->sw_kschedule, iv);
176 	}
177 
178 	count = crp->crp_payload_start;
179 	ind = cuio_getptr(uio, count, &k);
180 	if (ind == -1) {
181 		error = EINVAL;
182 		goto out;
183 	}
184 
185 	i = crp->crp_payload_length;
186 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
187 
188 	while (i > 0) {
189 		/*
190 		 * If there's insufficient data at the end of
191 		 * an iovec, we have to do some copying.
192 		 */
193 		if (uio->uio_iov[ind].iov_len < k + blks &&
194 		    uio->uio_iov[ind].iov_len != k) {
195 			cuio_copydata(uio, count, blks, blk);
196 
197 			/* Actual encryption/decryption */
198 			if (exf->reinit) {
199 				if (encrypting) {
200 					exf->encrypt(sw->sw_kschedule,
201 					    blk);
202 				} else {
203 					exf->decrypt(sw->sw_kschedule,
204 					    blk);
205 				}
206 			} else if (encrypting) {
207 				/* XOR with previous block */
208 				for (j = 0; j < blks; j++)
209 					blk[j] ^= ivp[j];
210 
211 				exf->encrypt(sw->sw_kschedule, blk);
212 
213 				/*
214 				 * Keep encrypted block for XOR'ing
215 				 * with next block
216 				 */
217 				bcopy(blk, iv, blks);
218 				ivp = iv;
219 			} else {	/* decrypt */
220 				/*
221 				 * Keep encrypted block for XOR'ing
222 				 * with next block
223 				 */
224 				nivp = (ivp == iv) ? iv2 : iv;
225 				bcopy(blk, nivp, blks);
226 
227 				exf->decrypt(sw->sw_kschedule, blk);
228 
229 				/* XOR with previous block */
230 				for (j = 0; j < blks; j++)
231 					blk[j] ^= ivp[j];
232 
233 				ivp = nivp;
234 			}
235 
236 			/* Copy back decrypted block */
237 			cuio_copyback(uio, count, blks, blk);
238 
239 			count += blks;
240 
241 			/* Advance pointer */
242 			ind = cuio_getptr(uio, count, &k);
243 			if (ind == -1) {
244 				error = EINVAL;
245 				goto out;
246 			}
247 
248 			i -= blks;
249 
250 			/* Could be done... */
251 			if (i == 0)
252 				break;
253 		}
254 
255 		while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
256 			uint8_t *idat;
257 			size_t nb, rem;
258 
259 			nb = blks;
260 			rem = MIN((size_t)i,
261 			    uio->uio_iov[ind].iov_len - (size_t)k);
262 			idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
263 
264 			if (exf->reinit) {
265 				if (encrypting && exf->encrypt_multi == NULL)
266 					exf->encrypt(sw->sw_kschedule,
267 					    idat);
268 				else if (encrypting) {
269 					nb = rounddown(rem, blks);
270 					exf->encrypt_multi(sw->sw_kschedule,
271 					    idat, nb);
272 				} else if (exf->decrypt_multi == NULL)
273 					exf->decrypt(sw->sw_kschedule,
274 					    idat);
275 				else {
276 					nb = rounddown(rem, blks);
277 					exf->decrypt_multi(sw->sw_kschedule,
278 					    idat, nb);
279 				}
280 			} else if (encrypting) {
281 				/* XOR with previous block/IV */
282 				for (j = 0; j < blks; j++)
283 					idat[j] ^= ivp[j];
284 
285 				exf->encrypt(sw->sw_kschedule, idat);
286 				ivp = idat;
287 			} else {	/* decrypt */
288 				/*
289 				 * Keep encrypted block to be used
290 				 * in next block's processing.
291 				 */
292 				nivp = (ivp == iv) ? iv2 : iv;
293 				bcopy(idat, nivp, blks);
294 
295 				exf->decrypt(sw->sw_kschedule, idat);
296 
297 				/* XOR with previous block/IV */
298 				for (j = 0; j < blks; j++)
299 					idat[j] ^= ivp[j];
300 
301 				ivp = nivp;
302 			}
303 
304 			count += nb;
305 			k += nb;
306 			i -= nb;
307 		}
308 
309 		/*
310 		 * Advance to the next iov if the end of the current iov
311 		 * is aligned with the end of a cipher block.
312 		 * Note that the code is equivalent to calling:
313 		 *      ind = cuio_getptr(uio, count, &k);
314 		 */
315 		if (i > 0 && k == uio->uio_iov[ind].iov_len) {
316 			k = 0;
317 			ind++;
318 			if (ind >= uio->uio_iovcnt) {
319 				error = EINVAL;
320 				goto out;
321 			}
322 		}
323 	}
324 
325 out:
326 	if (iovalloc)
327 		free(iov, M_CRYPTO_DATA);
328 
329 	return (error);
330 }
331 
332 static void
333 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
334     const uint8_t *key, int klen)
335 {
336 
337 	switch (axf->type) {
338 	case CRYPTO_SHA1_HMAC:
339 	case CRYPTO_SHA2_224_HMAC:
340 	case CRYPTO_SHA2_256_HMAC:
341 	case CRYPTO_SHA2_384_HMAC:
342 	case CRYPTO_SHA2_512_HMAC:
343 	case CRYPTO_NULL_HMAC:
344 	case CRYPTO_RIPEMD160_HMAC:
345 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
346 		hmac_init_opad(axf, key, klen, sw->sw_octx);
347 		break;
348 	case CRYPTO_POLY1305:
349 	case CRYPTO_BLAKE2B:
350 	case CRYPTO_BLAKE2S:
351 		axf->Setkey(sw->sw_ictx, key, klen);
352 		axf->Init(sw->sw_ictx);
353 		break;
354 	default:
355 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
356 	}
357 }
358 
359 /*
360  * Compute or verify hash.
361  */
362 static int
363 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
364 {
365 	u_char aalg[HASH_MAX_LEN];
366 	u_char uaalg[HASH_MAX_LEN];
367 	const struct crypto_session_params *csp;
368 	struct swcr_auth *sw;
369 	struct auth_hash *axf;
370 	union authctx ctx;
371 	int err;
372 
373 	sw = &ses->swcr_auth;
374 
375 	axf = sw->sw_axf;
376 
377 	if (crp->crp_auth_key != NULL) {
378 		csp = crypto_get_params(crp->crp_session);
379 		swcr_authprepare(axf, sw, crp->crp_auth_key,
380 		    csp->csp_auth_klen);
381 	}
382 
383 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
384 
385 	err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
386 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
387 	if (err)
388 		return err;
389 
390 	err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
391 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
392 	if (err)
393 		return err;
394 
395 	switch (axf->type) {
396 	case CRYPTO_SHA1:
397 	case CRYPTO_SHA2_224:
398 	case CRYPTO_SHA2_256:
399 	case CRYPTO_SHA2_384:
400 	case CRYPTO_SHA2_512:
401 		axf->Final(aalg, &ctx);
402 		break;
403 
404 	case CRYPTO_SHA1_HMAC:
405 	case CRYPTO_SHA2_224_HMAC:
406 	case CRYPTO_SHA2_256_HMAC:
407 	case CRYPTO_SHA2_384_HMAC:
408 	case CRYPTO_SHA2_512_HMAC:
409 	case CRYPTO_RIPEMD160_HMAC:
410 		if (sw->sw_octx == NULL)
411 			return EINVAL;
412 
413 		axf->Final(aalg, &ctx);
414 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
415 		axf->Update(&ctx, aalg, axf->hashsize);
416 		axf->Final(aalg, &ctx);
417 		break;
418 
419 	case CRYPTO_BLAKE2B:
420 	case CRYPTO_BLAKE2S:
421 	case CRYPTO_NULL_HMAC:
422 	case CRYPTO_POLY1305:
423 		axf->Final(aalg, &ctx);
424 		break;
425 	}
426 
427 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
428 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
429 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
430 			return (EBADMSG);
431 	} else {
432 		/* Inject the authentication data */
433 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
434 	}
435 	return (0);
436 }
437 
438 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
439 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
440 
441 static int
442 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
443 {
444 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
445 	u_char *blk = (u_char *)blkbuf;
446 	u_char aalg[AALG_MAX_RESULT_LEN];
447 	u_char uaalg[AALG_MAX_RESULT_LEN];
448 	u_char iv[EALG_MAX_BLOCK_LEN];
449 	union authctx ctx;
450 	struct swcr_auth *swa;
451 	struct auth_hash *axf;
452 	uint32_t *blkp;
453 	int blksz, i, ivlen, len;
454 
455 	swa = &ses->swcr_auth;
456 	axf = swa->sw_axf;
457 
458 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
459 	blksz = axf->blocksize;
460 
461 	/* Initialize the IV */
462 	ivlen = AES_GCM_IV_LEN;
463 	crypto_read_iv(crp, iv);
464 
465 	axf->Reinit(&ctx, iv, ivlen);
466 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
467 		len = MIN(crp->crp_payload_length - i, blksz);
468 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
469 		bzero(blk + len, blksz - len);
470 		axf->Update(&ctx, blk, blksz);
471 	}
472 
473 	/* length block */
474 	bzero(blk, blksz);
475 	blkp = (uint32_t *)blk + 1;
476 	*blkp = htobe32(crp->crp_payload_length * 8);
477 	axf->Update(&ctx, blk, blksz);
478 
479 	/* Finalize MAC */
480 	axf->Final(aalg, &ctx);
481 
482 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
483 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
484 		    uaalg);
485 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
486 			return (EBADMSG);
487 	} else {
488 		/* Inject the authentication data */
489 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
490 	}
491 	return (0);
492 }
493 
494 static int
495 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
496 {
497 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
498 	u_char *blk = (u_char *)blkbuf;
499 	u_char aalg[AALG_MAX_RESULT_LEN];
500 	u_char uaalg[AALG_MAX_RESULT_LEN];
501 	u_char iv[EALG_MAX_BLOCK_LEN];
502 	union authctx ctx;
503 	struct swcr_auth *swa;
504 	struct swcr_encdec *swe;
505 	struct auth_hash *axf;
506 	struct enc_xform *exf;
507 	uint32_t *blkp;
508 	int blksz, i, ivlen, len, r;
509 
510 	swa = &ses->swcr_auth;
511 	axf = swa->sw_axf;
512 
513 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
514 	blksz = axf->blocksize;
515 
516 	swe = &ses->swcr_encdec;
517 	exf = swe->sw_exf;
518 
519 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
520 		return (EINVAL);
521 
522 	/* Initialize the IV */
523 	ivlen = AES_GCM_IV_LEN;
524 	bcopy(crp->crp_iv, iv, ivlen);
525 
526 	/* Supply MAC with IV */
527 	axf->Reinit(&ctx, iv, ivlen);
528 
529 	/* Supply MAC with AAD */
530 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
531 		len = MIN(crp->crp_aad_length - i, blksz);
532 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
533 		bzero(blk + len, blksz - len);
534 		axf->Update(&ctx, blk, blksz);
535 	}
536 
537 	exf->reinit(swe->sw_kschedule, iv);
538 
539 	/* Do encryption with MAC */
540 	for (i = 0; i < crp->crp_payload_length; i += len) {
541 		len = MIN(crp->crp_payload_length - i, blksz);
542 		if (len < blksz)
543 			bzero(blk, blksz);
544 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
545 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
546 			exf->encrypt(swe->sw_kschedule, blk);
547 			axf->Update(&ctx, blk, len);
548 			crypto_copyback(crp, crp->crp_payload_start + i, len,
549 			    blk);
550 		} else {
551 			axf->Update(&ctx, blk, len);
552 		}
553 	}
554 
555 	/* length block */
556 	bzero(blk, blksz);
557 	blkp = (uint32_t *)blk + 1;
558 	*blkp = htobe32(crp->crp_aad_length * 8);
559 	blkp = (uint32_t *)blk + 3;
560 	*blkp = htobe32(crp->crp_payload_length * 8);
561 	axf->Update(&ctx, blk, blksz);
562 
563 	/* Finalize MAC */
564 	axf->Final(aalg, &ctx);
565 
566 	/* Validate tag */
567 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
568 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
569 		    uaalg);
570 
571 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
572 		if (r != 0)
573 			return (EBADMSG);
574 
575 		/* tag matches, decrypt data */
576 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
577 			len = MIN(crp->crp_payload_length - i, blksz);
578 			if (len < blksz)
579 				bzero(blk, blksz);
580 			crypto_copydata(crp, crp->crp_payload_start + i, len,
581 			    blk);
582 			exf->decrypt(swe->sw_kschedule, blk);
583 			crypto_copyback(crp, crp->crp_payload_start + i, len,
584 			    blk);
585 		}
586 	} else {
587 		/* Inject the authentication data */
588 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
589 		    aalg);
590 	}
591 
592 	return (0);
593 }
594 
595 static int
596 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
597 {
598 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
599 	u_char *blk = (u_char *)blkbuf;
600 	u_char aalg[AALG_MAX_RESULT_LEN];
601 	u_char uaalg[AALG_MAX_RESULT_LEN];
602 	u_char iv[EALG_MAX_BLOCK_LEN];
603 	union authctx ctx;
604 	struct swcr_auth *swa;
605 	struct auth_hash *axf;
606 	int blksz, i, ivlen, len;
607 
608 	swa = &ses->swcr_auth;
609 	axf = swa->sw_axf;
610 
611 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
612 	blksz = axf->blocksize;
613 
614 	/* Initialize the IV */
615 	ivlen = AES_CCM_IV_LEN;
616 	crypto_read_iv(crp, iv);
617 
618 	/*
619 	 * AES CCM-CBC-MAC needs to know the length of both the auth
620 	 * data and payload data before doing the auth computation.
621 	 */
622 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
623 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
624 
625 	axf->Reinit(&ctx, iv, ivlen);
626 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
627 		len = MIN(crp->crp_payload_length - i, blksz);
628 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
629 		bzero(blk + len, blksz - len);
630 		axf->Update(&ctx, blk, blksz);
631 	}
632 
633 	/* Finalize MAC */
634 	axf->Final(aalg, &ctx);
635 
636 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
637 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
638 		    uaalg);
639 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
640 			return (EBADMSG);
641 	} else {
642 		/* Inject the authentication data */
643 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
644 	}
645 	return (0);
646 }
647 
648 static int
649 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
650 {
651 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
652 	u_char *blk = (u_char *)blkbuf;
653 	u_char aalg[AALG_MAX_RESULT_LEN];
654 	u_char uaalg[AALG_MAX_RESULT_LEN];
655 	u_char iv[EALG_MAX_BLOCK_LEN];
656 	union authctx ctx;
657 	struct swcr_auth *swa;
658 	struct swcr_encdec *swe;
659 	struct auth_hash *axf;
660 	struct enc_xform *exf;
661 	int blksz, i, ivlen, len, r;
662 
663 	swa = &ses->swcr_auth;
664 	axf = swa->sw_axf;
665 
666 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
667 	blksz = axf->blocksize;
668 
669 	swe = &ses->swcr_encdec;
670 	exf = swe->sw_exf;
671 
672 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
673 		return (EINVAL);
674 
675 	/* Initialize the IV */
676 	ivlen = AES_CCM_IV_LEN;
677 	bcopy(crp->crp_iv, iv, ivlen);
678 
679 	/*
680 	 * AES CCM-CBC-MAC needs to know the length of both the auth
681 	 * data and payload data before doing the auth computation.
682 	 */
683 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
684 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
685 
686 	/* Supply MAC with IV */
687 	axf->Reinit(&ctx, iv, ivlen);
688 
689 	/* Supply MAC with AAD */
690 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
691 		len = MIN(crp->crp_aad_length - i, blksz);
692 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
693 		bzero(blk + len, blksz - len);
694 		axf->Update(&ctx, blk, blksz);
695 	}
696 
697 	exf->reinit(swe->sw_kschedule, iv);
698 
699 	/* Do encryption/decryption with MAC */
700 	for (i = 0; i < crp->crp_payload_length; i += len) {
701 		len = MIN(crp->crp_payload_length - i, blksz);
702 		if (len < blksz)
703 			bzero(blk, blksz);
704 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
705 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
706 			axf->Update(&ctx, blk, len);
707 			exf->encrypt(swe->sw_kschedule, blk);
708 			crypto_copyback(crp, crp->crp_payload_start + i, len,
709 			    blk);
710 		} else {
711 			/*
712 			 * One of the problems with CCM+CBC is that
713 			 * the authentication is done on the
714 			 * unecncrypted data.  As a result, we have to
715 			 * decrypt the data twice: once to generate
716 			 * the tag and a second time after the tag is
717 			 * verified.
718 			 */
719 			exf->decrypt(swe->sw_kschedule, blk);
720 			axf->Update(&ctx, blk, len);
721 		}
722 	}
723 
724 	/* Finalize MAC */
725 	axf->Final(aalg, &ctx);
726 
727 	/* Validate tag */
728 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
729 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
730 		    uaalg);
731 
732 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
733 		if (r != 0)
734 			return (EBADMSG);
735 
736 		/* tag matches, decrypt data */
737 		exf->reinit(swe->sw_kschedule, iv);
738 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
739 			len = MIN(crp->crp_payload_length - i, blksz);
740 			if (len < blksz)
741 				bzero(blk, blksz);
742 			crypto_copydata(crp, crp->crp_payload_start + i, len,
743 			    blk);
744 			exf->decrypt(swe->sw_kschedule, blk);
745 			crypto_copyback(crp, crp->crp_payload_start + i, len,
746 			    blk);
747 		}
748 	} else {
749 		/* Inject the authentication data */
750 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
751 		    aalg);
752 	}
753 
754 	return (0);
755 }
756 
757 /*
758  * Apply a cipher and a digest to perform EtA.
759  */
760 static int
761 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
762 {
763 	int error;
764 
765 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
766 		error = swcr_encdec(ses, crp);
767 		if (error == 0)
768 			error = swcr_authcompute(ses, crp);
769 	} else {
770 		error = swcr_authcompute(ses, crp);
771 		if (error == 0)
772 			error = swcr_encdec(ses, crp);
773 	}
774 	return (error);
775 }
776 
777 /*
778  * Apply a compression/decompression algorithm
779  */
780 static int
781 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
782 {
783 	u_int8_t *data, *out;
784 	struct comp_algo *cxf;
785 	int adj;
786 	u_int32_t result;
787 
788 	cxf = ses->swcr_compdec.sw_cxf;
789 
790 	/* We must handle the whole buffer of data in one time
791 	 * then if there is not all the data in the mbuf, we must
792 	 * copy in a buffer.
793 	 */
794 
795 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
796 	if (data == NULL)
797 		return (EINVAL);
798 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
799 	    data);
800 
801 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
802 		result = cxf->compress(data, crp->crp_payload_length, &out);
803 	else
804 		result = cxf->decompress(data, crp->crp_payload_length, &out);
805 
806 	free(data, M_CRYPTO_DATA);
807 	if (result == 0)
808 		return (EINVAL);
809 	crp->crp_olen = result;
810 
811 	/* Check the compressed size when doing compression */
812 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
813 		if (result >= crp->crp_payload_length) {
814 			/* Compression was useless, we lost time */
815 			free(out, M_CRYPTO_DATA);
816 			return (0);
817 		}
818 	}
819 
820 	/* Copy back the (de)compressed data. m_copyback is
821 	 * extending the mbuf as necessary.
822 	 */
823 	crypto_copyback(crp, crp->crp_payload_start, result, out);
824 	if (result < crp->crp_payload_length) {
825 		switch (crp->crp_buf_type) {
826 		case CRYPTO_BUF_MBUF:
827 			adj = result - crp->crp_payload_length;
828 			m_adj(crp->crp_mbuf, adj);
829 			break;
830 		case CRYPTO_BUF_UIO: {
831 			struct uio *uio = crp->crp_uio;
832 			int ind;
833 
834 			adj = crp->crp_payload_length - result;
835 			ind = uio->uio_iovcnt - 1;
836 
837 			while (adj > 0 && ind >= 0) {
838 				if (adj < uio->uio_iov[ind].iov_len) {
839 					uio->uio_iov[ind].iov_len -= adj;
840 					break;
841 				}
842 
843 				adj -= uio->uio_iov[ind].iov_len;
844 				uio->uio_iov[ind].iov_len = 0;
845 				ind--;
846 				uio->uio_iovcnt--;
847 			}
848 			}
849 			break;
850 		}
851 	}
852 	free(out, M_CRYPTO_DATA);
853 	return 0;
854 }
855 
856 static int
857 swcr_setup_encdec(struct swcr_session *ses,
858     const struct crypto_session_params *csp)
859 {
860 	struct swcr_encdec *swe;
861 	struct enc_xform *txf;
862 	int error;
863 
864 	swe = &ses->swcr_encdec;
865 	txf = crypto_cipher(csp);
866 	MPASS(txf->ivsize == csp->csp_ivlen);
867 	if (csp->csp_cipher_key != NULL) {
868 		error = txf->setkey(&swe->sw_kschedule,
869 		    csp->csp_cipher_key, csp->csp_cipher_klen);
870 		if (error)
871 			return (error);
872 	}
873 	swe->sw_exf = txf;
874 	return (0);
875 }
876 
877 static int
878 swcr_setup_auth(struct swcr_session *ses,
879     const struct crypto_session_params *csp)
880 {
881 	struct swcr_auth *swa;
882 	struct auth_hash *axf;
883 
884 	swa = &ses->swcr_auth;
885 
886 	axf = crypto_auth_hash(csp);
887 	swa->sw_axf = axf;
888 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
889 		return (EINVAL);
890 	if (csp->csp_auth_mlen == 0)
891 		swa->sw_mlen = axf->hashsize;
892 	else
893 		swa->sw_mlen = csp->csp_auth_mlen;
894 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
895 	if (swa->sw_ictx == NULL)
896 		return (ENOBUFS);
897 
898 	switch (csp->csp_auth_alg) {
899 	case CRYPTO_SHA1_HMAC:
900 	case CRYPTO_SHA2_224_HMAC:
901 	case CRYPTO_SHA2_256_HMAC:
902 	case CRYPTO_SHA2_384_HMAC:
903 	case CRYPTO_SHA2_512_HMAC:
904 	case CRYPTO_NULL_HMAC:
905 	case CRYPTO_RIPEMD160_HMAC:
906 		swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
907 		    M_NOWAIT);
908 		if (swa->sw_octx == NULL)
909 			return (ENOBUFS);
910 
911 		if (csp->csp_auth_key != NULL) {
912 			swcr_authprepare(axf, swa, csp->csp_auth_key,
913 			    csp->csp_auth_klen);
914 		}
915 
916 		if (csp->csp_mode == CSP_MODE_DIGEST)
917 			ses->swcr_process = swcr_authcompute;
918 		break;
919 	case CRYPTO_SHA1:
920 	case CRYPTO_SHA2_224:
921 	case CRYPTO_SHA2_256:
922 	case CRYPTO_SHA2_384:
923 	case CRYPTO_SHA2_512:
924 		axf->Init(swa->sw_ictx);
925 		if (csp->csp_mode == CSP_MODE_DIGEST)
926 			ses->swcr_process = swcr_authcompute;
927 		break;
928 	case CRYPTO_AES_NIST_GMAC:
929 		axf->Init(swa->sw_ictx);
930 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
931 		    csp->csp_auth_klen);
932 		if (csp->csp_mode == CSP_MODE_DIGEST)
933 			ses->swcr_process = swcr_gmac;
934 		break;
935 	case CRYPTO_POLY1305:
936 	case CRYPTO_BLAKE2B:
937 	case CRYPTO_BLAKE2S:
938 		/*
939 		 * Blake2b and Blake2s support an optional key but do
940 		 * not require one.
941 		 */
942 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
943 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
944 			    csp->csp_auth_klen);
945 		axf->Init(swa->sw_ictx);
946 		if (csp->csp_mode == CSP_MODE_DIGEST)
947 			ses->swcr_process = swcr_authcompute;
948 		break;
949 	case CRYPTO_AES_CCM_CBC_MAC:
950 		axf->Init(swa->sw_ictx);
951 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
952 		    csp->csp_auth_klen);
953 		if (csp->csp_mode == CSP_MODE_DIGEST)
954 			ses->swcr_process = swcr_ccm_cbc_mac;
955 		break;
956 	}
957 
958 	return (0);
959 }
960 
961 static int
962 swcr_setup_gcm(struct swcr_session *ses,
963     const struct crypto_session_params *csp)
964 {
965 	struct swcr_encdec *swe;
966 	struct swcr_auth *swa;
967 	struct enc_xform *txf;
968 	struct auth_hash *axf;
969 	int error;
970 
971 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
972 		return (EINVAL);
973 
974 	/* First, setup the auth side. */
975 	swa = &ses->swcr_auth;
976 	switch (csp->csp_cipher_klen * 8) {
977 	case 128:
978 		axf = &auth_hash_nist_gmac_aes_128;
979 		break;
980 	case 192:
981 		axf = &auth_hash_nist_gmac_aes_192;
982 		break;
983 	case 256:
984 		axf = &auth_hash_nist_gmac_aes_256;
985 		break;
986 	default:
987 		return (EINVAL);
988 	}
989 	swa->sw_axf = axf;
990 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
991 		return (EINVAL);
992 	if (csp->csp_auth_mlen == 0)
993 		swa->sw_mlen = axf->hashsize;
994 	else
995 		swa->sw_mlen = csp->csp_auth_mlen;
996 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
997 	if (swa->sw_ictx == NULL)
998 		return (ENOBUFS);
999 	axf->Init(swa->sw_ictx);
1000 	if (csp->csp_cipher_key != NULL)
1001 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1002 		    csp->csp_cipher_klen);
1003 
1004 	/* Second, setup the cipher side. */
1005 	swe = &ses->swcr_encdec;
1006 	txf = &enc_xform_aes_nist_gcm;
1007 	if (csp->csp_cipher_key != NULL) {
1008 		error = txf->setkey(&swe->sw_kschedule,
1009 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1010 		if (error)
1011 			return (error);
1012 	}
1013 	swe->sw_exf = txf;
1014 
1015 	return (0);
1016 }
1017 
1018 static int
1019 swcr_setup_ccm(struct swcr_session *ses,
1020     const struct crypto_session_params *csp)
1021 {
1022 	struct swcr_encdec *swe;
1023 	struct swcr_auth *swa;
1024 	struct enc_xform *txf;
1025 	struct auth_hash *axf;
1026 	int error;
1027 
1028 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1029 		return (EINVAL);
1030 
1031 	/* First, setup the auth side. */
1032 	swa = &ses->swcr_auth;
1033 	switch (csp->csp_cipher_klen * 8) {
1034 	case 128:
1035 		axf = &auth_hash_ccm_cbc_mac_128;
1036 		break;
1037 	case 192:
1038 		axf = &auth_hash_ccm_cbc_mac_192;
1039 		break;
1040 	case 256:
1041 		axf = &auth_hash_ccm_cbc_mac_256;
1042 		break;
1043 	default:
1044 		return (EINVAL);
1045 	}
1046 	swa->sw_axf = axf;
1047 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1048 		return (EINVAL);
1049 	if (csp->csp_auth_mlen == 0)
1050 		swa->sw_mlen = axf->hashsize;
1051 	else
1052 		swa->sw_mlen = csp->csp_auth_mlen;
1053 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1054 	if (swa->sw_ictx == NULL)
1055 		return (ENOBUFS);
1056 	axf->Init(swa->sw_ictx);
1057 	if (csp->csp_cipher_key != NULL)
1058 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1059 		    csp->csp_cipher_klen);
1060 
1061 	/* Second, setup the cipher side. */
1062 	swe = &ses->swcr_encdec;
1063 	txf = &enc_xform_ccm;
1064 	if (csp->csp_cipher_key != NULL) {
1065 		error = txf->setkey(&swe->sw_kschedule,
1066 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1067 		if (error)
1068 			return (error);
1069 	}
1070 	swe->sw_exf = txf;
1071 
1072 	return (0);
1073 }
1074 
1075 static bool
1076 swcr_auth_supported(const struct crypto_session_params *csp)
1077 {
1078 	struct auth_hash *axf;
1079 
1080 	axf = crypto_auth_hash(csp);
1081 	if (axf == NULL)
1082 		return (false);
1083 	switch (csp->csp_auth_alg) {
1084 	case CRYPTO_SHA1_HMAC:
1085 	case CRYPTO_SHA2_224_HMAC:
1086 	case CRYPTO_SHA2_256_HMAC:
1087 	case CRYPTO_SHA2_384_HMAC:
1088 	case CRYPTO_SHA2_512_HMAC:
1089 	case CRYPTO_NULL_HMAC:
1090 	case CRYPTO_RIPEMD160_HMAC:
1091 		break;
1092 	case CRYPTO_AES_NIST_GMAC:
1093 		switch (csp->csp_auth_klen * 8) {
1094 		case 128:
1095 		case 192:
1096 		case 256:
1097 			break;
1098 		default:
1099 			return (false);
1100 		}
1101 		if (csp->csp_auth_key == NULL)
1102 			return (false);
1103 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1104 			return (false);
1105 		break;
1106 	case CRYPTO_POLY1305:
1107 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1108 			return (false);
1109 		break;
1110 	case CRYPTO_AES_CCM_CBC_MAC:
1111 		switch (csp->csp_auth_klen * 8) {
1112 		case 128:
1113 		case 192:
1114 		case 256:
1115 			break;
1116 		default:
1117 			return (false);
1118 		}
1119 		if (csp->csp_auth_key == NULL)
1120 			return (false);
1121 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1122 			return (false);
1123 		break;
1124 	}
1125 	return (true);
1126 }
1127 
1128 static bool
1129 swcr_cipher_supported(const struct crypto_session_params *csp)
1130 {
1131 	struct enc_xform *txf;
1132 
1133 	txf = crypto_cipher(csp);
1134 	if (txf == NULL)
1135 		return (false);
1136 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1137 	    txf->ivsize != csp->csp_ivlen)
1138 		return (false);
1139 	return (true);
1140 }
1141 
1142 static int
1143 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1144 {
1145 
1146 	if (csp->csp_flags != 0)
1147 		return (EINVAL);
1148 	switch (csp->csp_mode) {
1149 	case CSP_MODE_COMPRESS:
1150 		switch (csp->csp_cipher_alg) {
1151 		case CRYPTO_DEFLATE_COMP:
1152 			break;
1153 		default:
1154 			return (EINVAL);
1155 		}
1156 		break;
1157 	case CSP_MODE_CIPHER:
1158 		switch (csp->csp_cipher_alg) {
1159 		case CRYPTO_AES_NIST_GCM_16:
1160 		case CRYPTO_AES_CCM_16:
1161 			return (EINVAL);
1162 		default:
1163 			if (!swcr_cipher_supported(csp))
1164 				return (EINVAL);
1165 			break;
1166 		}
1167 		break;
1168 	case CSP_MODE_DIGEST:
1169 		if (!swcr_auth_supported(csp))
1170 			return (EINVAL);
1171 		break;
1172 	case CSP_MODE_AEAD:
1173 		switch (csp->csp_cipher_alg) {
1174 		case CRYPTO_AES_NIST_GCM_16:
1175 		case CRYPTO_AES_CCM_16:
1176 			break;
1177 		default:
1178 			return (EINVAL);
1179 		}
1180 		break;
1181 	case CSP_MODE_ETA:
1182 		/* AEAD algorithms cannot be used for EtA. */
1183 		switch (csp->csp_cipher_alg) {
1184 		case CRYPTO_AES_NIST_GCM_16:
1185 		case CRYPTO_AES_CCM_16:
1186 			return (EINVAL);
1187 		}
1188 		switch (csp->csp_auth_alg) {
1189 		case CRYPTO_AES_NIST_GMAC:
1190 		case CRYPTO_AES_CCM_CBC_MAC:
1191 			return (EINVAL);
1192 		}
1193 
1194 		if (!swcr_cipher_supported(csp) ||
1195 		    !swcr_auth_supported(csp))
1196 			return (EINVAL);
1197 		break;
1198 	default:
1199 		return (EINVAL);
1200 	}
1201 
1202 	return (CRYPTODEV_PROBE_SOFTWARE);
1203 }
1204 
1205 /*
1206  * Generate a new software session.
1207  */
1208 static int
1209 swcr_newsession(device_t dev, crypto_session_t cses,
1210     const struct crypto_session_params *csp)
1211 {
1212 	struct swcr_session *ses;
1213 	struct swcr_encdec *swe;
1214 	struct swcr_auth *swa;
1215 	struct comp_algo *cxf;
1216 	int error;
1217 
1218 	ses = crypto_get_driver_session(cses);
1219 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1220 
1221 	error = 0;
1222 	swe = &ses->swcr_encdec;
1223 	swa = &ses->swcr_auth;
1224 	switch (csp->csp_mode) {
1225 	case CSP_MODE_COMPRESS:
1226 		switch (csp->csp_cipher_alg) {
1227 		case CRYPTO_DEFLATE_COMP:
1228 			cxf = &comp_algo_deflate;
1229 			break;
1230 #ifdef INVARIANTS
1231 		default:
1232 			panic("bad compression algo");
1233 #endif
1234 		}
1235 		ses->swcr_compdec.sw_cxf = cxf;
1236 		ses->swcr_process = swcr_compdec;
1237 		break;
1238 	case CSP_MODE_CIPHER:
1239 		switch (csp->csp_cipher_alg) {
1240 		case CRYPTO_NULL_CBC:
1241 			ses->swcr_process = swcr_null;
1242 			break;
1243 #ifdef INVARIANTS
1244 		case CRYPTO_AES_NIST_GCM_16:
1245 		case CRYPTO_AES_CCM_16:
1246 			panic("bad cipher algo");
1247 #endif
1248 		default:
1249 			error = swcr_setup_encdec(ses, csp);
1250 			if (error == 0)
1251 				ses->swcr_process = swcr_encdec;
1252 		}
1253 		break;
1254 	case CSP_MODE_DIGEST:
1255 		error = swcr_setup_auth(ses, csp);
1256 		break;
1257 	case CSP_MODE_AEAD:
1258 		switch (csp->csp_cipher_alg) {
1259 		case CRYPTO_AES_NIST_GCM_16:
1260 			error = swcr_setup_gcm(ses, csp);
1261 			if (error == 0)
1262 				ses->swcr_process = swcr_gcm;
1263 			break;
1264 		case CRYPTO_AES_CCM_16:
1265 			error = swcr_setup_ccm(ses, csp);
1266 			if (error == 0)
1267 				ses->swcr_process = swcr_ccm;
1268 			break;
1269 #ifdef INVARIANTS
1270 		default:
1271 			panic("bad aead algo");
1272 #endif
1273 		}
1274 		break;
1275 	case CSP_MODE_ETA:
1276 #ifdef INVARIANTS
1277 		switch (csp->csp_cipher_alg) {
1278 		case CRYPTO_AES_NIST_GCM_16:
1279 		case CRYPTO_AES_CCM_16:
1280 			panic("bad eta cipher algo");
1281 		}
1282 		switch (csp->csp_auth_alg) {
1283 		case CRYPTO_AES_NIST_GMAC:
1284 		case CRYPTO_AES_CCM_CBC_MAC:
1285 			panic("bad eta auth algo");
1286 		}
1287 #endif
1288 
1289 		error = swcr_setup_auth(ses, csp);
1290 		if (error)
1291 			break;
1292 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1293 			/* Effectively degrade to digest mode. */
1294 			ses->swcr_process = swcr_authcompute;
1295 			break;
1296 		}
1297 
1298 		error = swcr_setup_encdec(ses, csp);
1299 		if (error == 0)
1300 			ses->swcr_process = swcr_eta;
1301 		break;
1302 	default:
1303 		error = EINVAL;
1304 	}
1305 
1306 	if (error)
1307 		swcr_freesession(dev, cses);
1308 	return (error);
1309 }
1310 
1311 static void
1312 swcr_freesession(device_t dev, crypto_session_t cses)
1313 {
1314 	struct swcr_session *ses;
1315 	struct swcr_auth *swa;
1316 	struct enc_xform *txf;
1317 	struct auth_hash *axf;
1318 
1319 	ses = crypto_get_driver_session(cses);
1320 
1321 	mtx_destroy(&ses->swcr_lock);
1322 
1323 	txf = ses->swcr_encdec.sw_exf;
1324 	if (txf != NULL) {
1325 		if (ses->swcr_encdec.sw_kschedule != NULL)
1326 			txf->zerokey(&(ses->swcr_encdec.sw_kschedule));
1327 	}
1328 
1329 	axf = ses->swcr_auth.sw_axf;
1330 	if (axf != NULL) {
1331 		swa = &ses->swcr_auth;
1332 		if (swa->sw_ictx != NULL) {
1333 			explicit_bzero(swa->sw_ictx, axf->ctxsize);
1334 			free(swa->sw_ictx, M_CRYPTO_DATA);
1335 		}
1336 		if (swa->sw_octx != NULL) {
1337 			explicit_bzero(swa->sw_octx, axf->ctxsize);
1338 			free(swa->sw_octx, M_CRYPTO_DATA);
1339 		}
1340 	}
1341 }
1342 
1343 /*
1344  * Process a software request.
1345  */
1346 static int
1347 swcr_process(device_t dev, struct cryptop *crp, int hint)
1348 {
1349 	struct swcr_session *ses;
1350 
1351 	ses = crypto_get_driver_session(crp->crp_session);
1352 	mtx_lock(&ses->swcr_lock);
1353 
1354 	crp->crp_etype = ses->swcr_process(ses, crp);
1355 
1356 	mtx_unlock(&ses->swcr_lock);
1357 	crypto_done(crp);
1358 	return (0);
1359 }
1360 
1361 static void
1362 swcr_identify(driver_t *drv, device_t parent)
1363 {
1364 	/* NB: order 10 is so we get attached after h/w devices */
1365 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1366 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1367 		panic("cryptosoft: could not attach");
1368 }
1369 
1370 static int
1371 swcr_probe(device_t dev)
1372 {
1373 	device_set_desc(dev, "software crypto");
1374 	return (BUS_PROBE_NOWILDCARD);
1375 }
1376 
1377 static int
1378 swcr_attach(device_t dev)
1379 {
1380 
1381 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1382 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1383 	if (swcr_id < 0) {
1384 		device_printf(dev, "cannot initialize!");
1385 		return (ENXIO);
1386 	}
1387 
1388 	return (0);
1389 }
1390 
1391 static int
1392 swcr_detach(device_t dev)
1393 {
1394 	crypto_unregister_all(swcr_id);
1395 	return 0;
1396 }
1397 
1398 static device_method_t swcr_methods[] = {
1399 	DEVMETHOD(device_identify,	swcr_identify),
1400 	DEVMETHOD(device_probe,		swcr_probe),
1401 	DEVMETHOD(device_attach,	swcr_attach),
1402 	DEVMETHOD(device_detach,	swcr_detach),
1403 
1404 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1405 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1406 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1407 	DEVMETHOD(cryptodev_process,	swcr_process),
1408 
1409 	{0, 0},
1410 };
1411 
1412 static driver_t swcr_driver = {
1413 	"cryptosoft",
1414 	swcr_methods,
1415 	0,		/* NB: no softc */
1416 };
1417 static devclass_t swcr_devclass;
1418 
1419 /*
1420  * NB: We explicitly reference the crypto module so we
1421  * get the necessary ordering when built as a loadable
1422  * module.  This is required because we bundle the crypto
1423  * module code together with the cryptosoft driver (otherwise
1424  * normal module dependencies would handle things).
1425  */
1426 extern int crypto_modevent(struct module *, int, void *);
1427 /* XXX where to attach */
1428 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1429 MODULE_VERSION(cryptosoft, 1);
1430 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1431