xref: /freebsd/sys/opencrypto/cryptosoft.c (revision 652a9748855320619e075c4e83aef2f5294412d2)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/blowfish/blowfish.h>
51 #include <crypto/sha1.h>
52 #include <opencrypto/rmd160.h>
53 #include <opencrypto/cast.h>
54 #include <opencrypto/skipjack.h>
55 #include <sys/md5.h>
56 
57 #include <opencrypto/cryptodev.h>
58 #include <opencrypto/xform.h>
59 
60 #include <sys/kobj.h>
61 #include <sys/bus.h>
62 #include "cryptodev_if.h"
63 
64 struct swcr_auth {
65 	void		*sw_ictx;
66 	void		*sw_octx;
67 	struct auth_hash *sw_axf;
68 	uint16_t	sw_mlen;
69 	uint16_t	sw_octx_len;
70 };
71 
72 struct swcr_encdec {
73 	uint8_t		*sw_kschedule;
74 	struct enc_xform *sw_exf;
75 };
76 
77 struct swcr_compdec {
78 	struct comp_algo *sw_cxf;
79 };
80 
81 struct swcr_session {
82 	struct mtx	swcr_lock;
83 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
84 
85 	struct swcr_auth swcr_auth;
86 	struct swcr_encdec swcr_encdec;
87 	struct swcr_compdec swcr_compdec;
88 };
89 
90 static	int32_t swcr_id;
91 
92 static	void swcr_freesession(device_t dev, crypto_session_t cses);
93 
94 /* Used for CRYPTO_NULL_CBC. */
95 static int
96 swcr_null(struct swcr_session *ses, struct cryptop *crp)
97 {
98 
99 	return (0);
100 }
101 
102 /*
103  * Apply a symmetric encryption/decryption algorithm.
104  */
105 static int
106 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
107 {
108 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
109 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
110 	const struct crypto_session_params *csp;
111 	struct swcr_encdec *sw;
112 	struct enc_xform *exf;
113 	int i, j, k, blks, ind, count, ivlen;
114 	struct uio *uio, uiolcl;
115 	struct iovec iovlcl[4];
116 	struct iovec *iov;
117 	int iovcnt, iovalloc;
118 	int error;
119 	bool encrypting;
120 
121 	error = 0;
122 
123 	sw = &ses->swcr_encdec;
124 	exf = sw->sw_exf;
125 	blks = exf->blocksize;
126 	ivlen = exf->ivsize;
127 
128 	/* Check for non-padded data */
129 	if ((crp->crp_payload_length % blks) != 0)
130 		return EINVAL;
131 
132 	if (exf == &enc_xform_aes_icm &&
133 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
134 		return (EINVAL);
135 
136 	/* IV explicitly provided ? */
137 	if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
138 		bcopy(crp->crp_iv, iv, ivlen);
139 	else if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
140 		arc4rand(iv, ivlen, 0);
141 		crypto_copyback(crp, crp->crp_iv_start, ivlen, iv);
142 	} else
143 		crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
144 
145 	if (crp->crp_cipher_key != NULL) {
146 		if (sw->sw_kschedule)
147 			exf->zerokey(&(sw->sw_kschedule));
148 
149 		csp = crypto_get_params(crp->crp_session);
150 		error = exf->setkey(&sw->sw_kschedule,
151 		    crp->crp_cipher_key, csp->csp_cipher_klen);
152 		if (error)
153 			return (error);
154 	}
155 
156 	iov = iovlcl;
157 	iovcnt = nitems(iovlcl);
158 	iovalloc = 0;
159 	uio = &uiolcl;
160 	switch (crp->crp_buf_type) {
161 	case CRYPTO_BUF_MBUF:
162 		error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt,
163 		    &iovalloc);
164 		if (error)
165 			return (error);
166 		uio->uio_iov = iov;
167 		uio->uio_iovcnt = iovcnt;
168 		break;
169 	case CRYPTO_BUF_UIO:
170 		uio = crp->crp_uio;
171 		break;
172 	case CRYPTO_BUF_CONTIG:
173 		iov[0].iov_base = crp->crp_buf;
174 		iov[0].iov_len = crp->crp_ilen;
175 		uio->uio_iov = iov;
176 		uio->uio_iovcnt = 1;
177 		break;
178 	}
179 
180 	ivp = iv;
181 
182 	if (exf->reinit) {
183 		/*
184 		 * xforms that provide a reinit method perform all IV
185 		 * handling themselves.
186 		 */
187 		exf->reinit(sw->sw_kschedule, iv);
188 	}
189 
190 	count = crp->crp_payload_start;
191 	ind = cuio_getptr(uio, count, &k);
192 	if (ind == -1) {
193 		error = EINVAL;
194 		goto out;
195 	}
196 
197 	i = crp->crp_payload_length;
198 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
199 
200 	while (i > 0) {
201 		/*
202 		 * If there's insufficient data at the end of
203 		 * an iovec, we have to do some copying.
204 		 */
205 		if (uio->uio_iov[ind].iov_len < k + blks &&
206 		    uio->uio_iov[ind].iov_len != k) {
207 			cuio_copydata(uio, count, blks, blk);
208 
209 			/* Actual encryption/decryption */
210 			if (exf->reinit) {
211 				if (encrypting) {
212 					exf->encrypt(sw->sw_kschedule,
213 					    blk);
214 				} else {
215 					exf->decrypt(sw->sw_kschedule,
216 					    blk);
217 				}
218 			} else if (encrypting) {
219 				/* XOR with previous block */
220 				for (j = 0; j < blks; j++)
221 					blk[j] ^= ivp[j];
222 
223 				exf->encrypt(sw->sw_kschedule, blk);
224 
225 				/*
226 				 * Keep encrypted block for XOR'ing
227 				 * with next block
228 				 */
229 				bcopy(blk, iv, blks);
230 				ivp = iv;
231 			} else {	/* decrypt */
232 				/*
233 				 * Keep encrypted block for XOR'ing
234 				 * with next block
235 				 */
236 				nivp = (ivp == iv) ? iv2 : iv;
237 				bcopy(blk, nivp, blks);
238 
239 				exf->decrypt(sw->sw_kschedule, blk);
240 
241 				/* XOR with previous block */
242 				for (j = 0; j < blks; j++)
243 					blk[j] ^= ivp[j];
244 
245 				ivp = nivp;
246 			}
247 
248 			/* Copy back decrypted block */
249 			cuio_copyback(uio, count, blks, blk);
250 
251 			count += blks;
252 
253 			/* Advance pointer */
254 			ind = cuio_getptr(uio, count, &k);
255 			if (ind == -1) {
256 				error = EINVAL;
257 				goto out;
258 			}
259 
260 			i -= blks;
261 
262 			/* Could be done... */
263 			if (i == 0)
264 				break;
265 		}
266 
267 		while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) {
268 			uint8_t *idat;
269 			size_t nb, rem;
270 
271 			nb = blks;
272 			rem = MIN((size_t)i,
273 			    uio->uio_iov[ind].iov_len - (size_t)k);
274 			idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
275 
276 			if (exf->reinit) {
277 				if (encrypting && exf->encrypt_multi == NULL)
278 					exf->encrypt(sw->sw_kschedule,
279 					    idat);
280 				else if (encrypting) {
281 					nb = rounddown(rem, blks);
282 					exf->encrypt_multi(sw->sw_kschedule,
283 					    idat, nb);
284 				} else if (exf->decrypt_multi == NULL)
285 					exf->decrypt(sw->sw_kschedule,
286 					    idat);
287 				else {
288 					nb = rounddown(rem, blks);
289 					exf->decrypt_multi(sw->sw_kschedule,
290 					    idat, nb);
291 				}
292 			} else if (encrypting) {
293 				/* XOR with previous block/IV */
294 				for (j = 0; j < blks; j++)
295 					idat[j] ^= ivp[j];
296 
297 				exf->encrypt(sw->sw_kschedule, idat);
298 				ivp = idat;
299 			} else {	/* decrypt */
300 				/*
301 				 * Keep encrypted block to be used
302 				 * in next block's processing.
303 				 */
304 				nivp = (ivp == iv) ? iv2 : iv;
305 				bcopy(idat, nivp, blks);
306 
307 				exf->decrypt(sw->sw_kschedule, idat);
308 
309 				/* XOR with previous block/IV */
310 				for (j = 0; j < blks; j++)
311 					idat[j] ^= ivp[j];
312 
313 				ivp = nivp;
314 			}
315 
316 			count += nb;
317 			k += nb;
318 			i -= nb;
319 		}
320 
321 		/*
322 		 * Advance to the next iov if the end of the current iov
323 		 * is aligned with the end of a cipher block.
324 		 * Note that the code is equivalent to calling:
325 		 *      ind = cuio_getptr(uio, count, &k);
326 		 */
327 		if (i > 0 && k == uio->uio_iov[ind].iov_len) {
328 			k = 0;
329 			ind++;
330 			if (ind >= uio->uio_iovcnt) {
331 				error = EINVAL;
332 				goto out;
333 			}
334 		}
335 	}
336 
337 out:
338 	if (iovalloc)
339 		free(iov, M_CRYPTO_DATA);
340 
341 	return (error);
342 }
343 
344 static void
345 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
346     const uint8_t *key, int klen)
347 {
348 
349 	switch (axf->type) {
350 	case CRYPTO_MD5_HMAC:
351 	case CRYPTO_SHA1_HMAC:
352 	case CRYPTO_SHA2_224_HMAC:
353 	case CRYPTO_SHA2_256_HMAC:
354 	case CRYPTO_SHA2_384_HMAC:
355 	case CRYPTO_SHA2_512_HMAC:
356 	case CRYPTO_NULL_HMAC:
357 	case CRYPTO_RIPEMD160_HMAC:
358 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
359 		hmac_init_opad(axf, key, klen, sw->sw_octx);
360 		break;
361 	case CRYPTO_MD5_KPDK:
362 	case CRYPTO_SHA1_KPDK:
363 	{
364 		/*
365 		 * We need a buffer that can hold an md5 and a sha1 result
366 		 * just to throw it away.
367 		 * What we do here is the initial part of:
368 		 *   ALGO( key, keyfill, .. )
369 		 * adding the key to sw_ictx and abusing Final() to get the
370 		 * "keyfill" padding.
371 		 * In addition we abuse the sw_octx to save the key to have
372 		 * it to be able to append it at the end in swcr_authcompute().
373 		 */
374 		u_char buf[SHA1_RESULTLEN];
375 
376 		bcopy(key, sw->sw_octx, klen);
377 		axf->Init(sw->sw_ictx);
378 		axf->Update(sw->sw_ictx, key, klen);
379 		axf->Final(buf, sw->sw_ictx);
380 		break;
381 	}
382 	case CRYPTO_POLY1305:
383 	case CRYPTO_BLAKE2B:
384 	case CRYPTO_BLAKE2S:
385 		axf->Setkey(sw->sw_ictx, key, klen);
386 		axf->Init(sw->sw_ictx);
387 		break;
388 	default:
389 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
390 	}
391 }
392 
393 /*
394  * Compute or verify hash.
395  */
396 static int
397 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
398 {
399 	u_char aalg[HASH_MAX_LEN];
400 	u_char uaalg[HASH_MAX_LEN];
401 	const struct crypto_session_params *csp;
402 	struct swcr_auth *sw;
403 	struct auth_hash *axf;
404 	union authctx ctx;
405 	int err;
406 
407 	sw = &ses->swcr_auth;
408 
409 	axf = sw->sw_axf;
410 
411 	if (crp->crp_auth_key != NULL) {
412 		csp = crypto_get_params(crp->crp_session);
413 		swcr_authprepare(axf, sw, crp->crp_auth_key,
414 		    csp->csp_auth_klen);
415 	}
416 
417 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
418 
419 	err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
420 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
421 	if (err)
422 		return err;
423 
424 	err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
425 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
426 	if (err)
427 		return err;
428 
429 	switch (axf->type) {
430 	case CRYPTO_SHA1:
431 	case CRYPTO_SHA2_224:
432 	case CRYPTO_SHA2_256:
433 	case CRYPTO_SHA2_384:
434 	case CRYPTO_SHA2_512:
435 		axf->Final(aalg, &ctx);
436 		break;
437 
438 	case CRYPTO_MD5_HMAC:
439 	case CRYPTO_SHA1_HMAC:
440 	case CRYPTO_SHA2_224_HMAC:
441 	case CRYPTO_SHA2_256_HMAC:
442 	case CRYPTO_SHA2_384_HMAC:
443 	case CRYPTO_SHA2_512_HMAC:
444 	case CRYPTO_RIPEMD160_HMAC:
445 		if (sw->sw_octx == NULL)
446 			return EINVAL;
447 
448 		axf->Final(aalg, &ctx);
449 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
450 		axf->Update(&ctx, aalg, axf->hashsize);
451 		axf->Final(aalg, &ctx);
452 		break;
453 
454 	case CRYPTO_MD5_KPDK:
455 	case CRYPTO_SHA1_KPDK:
456 		/* If we have no key saved, return error. */
457 		if (sw->sw_octx == NULL)
458 			return EINVAL;
459 
460 		/*
461 		 * Add the trailing copy of the key (see comment in
462 		 * swcr_authprepare()) after the data:
463 		 *   ALGO( .., key, algofill )
464 		 * and let Final() do the proper, natural "algofill"
465 		 * padding.
466 		 */
467 		axf->Update(&ctx, sw->sw_octx, sw->sw_octx_len);
468 		axf->Final(aalg, &ctx);
469 		break;
470 
471 	case CRYPTO_BLAKE2B:
472 	case CRYPTO_BLAKE2S:
473 	case CRYPTO_NULL_HMAC:
474 	case CRYPTO_POLY1305:
475 		axf->Final(aalg, &ctx);
476 		break;
477 	}
478 
479 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
480 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
481 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
482 			return (EBADMSG);
483 	} else {
484 		/* Inject the authentication data */
485 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
486 	}
487 	return (0);
488 }
489 
490 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
491 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
492 
493 static int
494 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
495 {
496 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
497 	u_char *blk = (u_char *)blkbuf;
498 	u_char aalg[AALG_MAX_RESULT_LEN];
499 	u_char uaalg[AALG_MAX_RESULT_LEN];
500 	u_char iv[EALG_MAX_BLOCK_LEN];
501 	union authctx ctx;
502 	struct swcr_auth *swa;
503 	struct auth_hash *axf;
504 	uint32_t *blkp;
505 	int blksz, i, ivlen, len;
506 
507 	swa = &ses->swcr_auth;
508 	axf = swa->sw_axf;
509 
510 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
511 	blksz = axf->blocksize;
512 
513 	if (crp->crp_flags & CRYPTO_F_IV_GENERATE)
514 		return (EINVAL);
515 
516 	/* Initialize the IV */
517 	ivlen = AES_GCM_IV_LEN;
518 	if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
519 		bcopy(crp->crp_iv, iv, ivlen);
520 	else
521 		crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
522 
523 	axf->Reinit(&ctx, iv, ivlen);
524 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
525 		len = MIN(crp->crp_payload_length - i, blksz);
526 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
527 		bzero(blk + len, blksz - len);
528 		axf->Update(&ctx, blk, blksz);
529 	}
530 
531 	/* length block */
532 	bzero(blk, blksz);
533 	blkp = (uint32_t *)blk + 1;
534 	*blkp = htobe32(crp->crp_payload_length * 8);
535 	axf->Update(&ctx, blk, blksz);
536 
537 	/* Finalize MAC */
538 	axf->Final(aalg, &ctx);
539 
540 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
541 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
542 		    uaalg);
543 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
544 			return (EBADMSG);
545 	} else {
546 		/* Inject the authentication data */
547 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
548 	}
549 	return (0);
550 }
551 
552 static int
553 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
554 {
555 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
556 	u_char *blk = (u_char *)blkbuf;
557 	u_char aalg[AALG_MAX_RESULT_LEN];
558 	u_char uaalg[AALG_MAX_RESULT_LEN];
559 	u_char iv[EALG_MAX_BLOCK_LEN];
560 	union authctx ctx;
561 	struct swcr_auth *swa;
562 	struct swcr_encdec *swe;
563 	struct auth_hash *axf;
564 	struct enc_xform *exf;
565 	uint32_t *blkp;
566 	int blksz, i, ivlen, len, r;
567 
568 	swa = &ses->swcr_auth;
569 	axf = swa->sw_axf;
570 
571 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
572 	blksz = axf->blocksize;
573 
574 	swe = &ses->swcr_encdec;
575 	exf = swe->sw_exf;
576 
577 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
578 		return (EINVAL);
579 
580 	/* Initialize the IV */
581 	ivlen = AES_GCM_IV_LEN;
582 	bcopy(crp->crp_iv, iv, ivlen);
583 
584 	/* Supply MAC with IV */
585 	axf->Reinit(&ctx, iv, ivlen);
586 
587 	/* Supply MAC with AAD */
588 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
589 		len = MIN(crp->crp_aad_length - i, blksz);
590 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
591 		bzero(blk + len, blksz - len);
592 		axf->Update(&ctx, blk, blksz);
593 	}
594 
595 	exf->reinit(swe->sw_kschedule, iv);
596 
597 	/* Do encryption with MAC */
598 	for (i = 0; i < crp->crp_payload_length; i += len) {
599 		len = MIN(crp->crp_payload_length - i, blksz);
600 		if (len < blksz)
601 			bzero(blk, blksz);
602 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
603 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
604 			exf->encrypt(swe->sw_kschedule, blk);
605 			axf->Update(&ctx, blk, len);
606 			crypto_copyback(crp, crp->crp_payload_start + i, len,
607 			    blk);
608 		} else {
609 			axf->Update(&ctx, blk, len);
610 		}
611 	}
612 
613 	/* length block */
614 	bzero(blk, blksz);
615 	blkp = (uint32_t *)blk + 1;
616 	*blkp = htobe32(crp->crp_aad_length * 8);
617 	blkp = (uint32_t *)blk + 3;
618 	*blkp = htobe32(crp->crp_payload_length * 8);
619 	axf->Update(&ctx, blk, blksz);
620 
621 	/* Finalize MAC */
622 	axf->Final(aalg, &ctx);
623 
624 	/* Validate tag */
625 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
626 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
627 		    uaalg);
628 
629 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
630 		if (r != 0)
631 			return (EBADMSG);
632 
633 		/* tag matches, decrypt data */
634 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
635 			len = MIN(crp->crp_payload_length - i, blksz);
636 			if (len < blksz)
637 				bzero(blk, blksz);
638 			crypto_copydata(crp, crp->crp_payload_start + i, len,
639 			    blk);
640 			exf->decrypt(swe->sw_kschedule, blk);
641 			crypto_copyback(crp, crp->crp_payload_start + i, len,
642 			    blk);
643 		}
644 	} else {
645 		/* Inject the authentication data */
646 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
647 		    aalg);
648 	}
649 
650 	return (0);
651 }
652 
653 static int
654 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
655 {
656 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
657 	u_char *blk = (u_char *)blkbuf;
658 	u_char aalg[AALG_MAX_RESULT_LEN];
659 	u_char uaalg[AALG_MAX_RESULT_LEN];
660 	u_char iv[EALG_MAX_BLOCK_LEN];
661 	union authctx ctx;
662 	struct swcr_auth *swa;
663 	struct auth_hash *axf;
664 	int blksz, i, ivlen, len;
665 
666 	swa = &ses->swcr_auth;
667 	axf = swa->sw_axf;
668 
669 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
670 	blksz = axf->blocksize;
671 
672 	if (crp->crp_flags & CRYPTO_F_IV_GENERATE)
673 		return (EINVAL);
674 
675 	/* Initialize the IV */
676 	ivlen = AES_CCM_IV_LEN;
677 	if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
678 		bcopy(crp->crp_iv, iv, ivlen);
679 	else
680 		crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
681 
682 	/*
683 	 * AES CCM-CBC-MAC needs to know the length of both the auth
684 	 * data and payload data before doing the auth computation.
685 	 */
686 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
687 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
688 
689 	axf->Reinit(&ctx, iv, ivlen);
690 	for (i = 0; i < crp->crp_payload_length; i += blksz) {
691 		len = MIN(crp->crp_payload_length - i, blksz);
692 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
693 		bzero(blk + len, blksz - len);
694 		axf->Update(&ctx, blk, blksz);
695 	}
696 
697 	/* Finalize MAC */
698 	axf->Final(aalg, &ctx);
699 
700 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
701 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
702 		    uaalg);
703 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
704 			return (EBADMSG);
705 	} else {
706 		/* Inject the authentication data */
707 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
708 	}
709 	return (0);
710 }
711 
712 static int
713 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
714 {
715 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
716 	u_char *blk = (u_char *)blkbuf;
717 	u_char aalg[AALG_MAX_RESULT_LEN];
718 	u_char uaalg[AALG_MAX_RESULT_LEN];
719 	u_char iv[EALG_MAX_BLOCK_LEN];
720 	union authctx ctx;
721 	struct swcr_auth *swa;
722 	struct swcr_encdec *swe;
723 	struct auth_hash *axf;
724 	struct enc_xform *exf;
725 	int blksz, i, ivlen, len, r;
726 
727 	swa = &ses->swcr_auth;
728 	axf = swa->sw_axf;
729 
730 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
731 	blksz = axf->blocksize;
732 
733 	swe = &ses->swcr_encdec;
734 	exf = swe->sw_exf;
735 
736 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
737 		return (EINVAL);
738 
739 	/* Initialize the IV */
740 	ivlen = AES_CCM_IV_LEN;
741 	bcopy(crp->crp_iv, iv, ivlen);
742 
743 	/*
744 	 * AES CCM-CBC-MAC needs to know the length of both the auth
745 	 * data and payload data before doing the auth computation.
746 	 */
747 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
748 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
749 
750 	/* Supply MAC with IV */
751 	axf->Reinit(&ctx, iv, ivlen);
752 
753 	/* Supply MAC with AAD */
754 	for (i = 0; i < crp->crp_aad_length; i += blksz) {
755 		len = MIN(crp->crp_aad_length - i, blksz);
756 		crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
757 		bzero(blk + len, blksz - len);
758 		axf->Update(&ctx, blk, blksz);
759 	}
760 
761 	exf->reinit(swe->sw_kschedule, iv);
762 
763 	/* Do encryption/decryption with MAC */
764 	for (i = 0; i < crp->crp_payload_length; i += len) {
765 		len = MIN(crp->crp_payload_length - i, blksz);
766 		if (len < blksz)
767 			bzero(blk, blksz);
768 		crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
769 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
770 			axf->Update(&ctx, blk, len);
771 			exf->encrypt(swe->sw_kschedule, blk);
772 			crypto_copyback(crp, crp->crp_payload_start + i, len,
773 			    blk);
774 		} else {
775 			/*
776 			 * One of the problems with CCM+CBC is that
777 			 * the authentication is done on the
778 			 * unecncrypted data.  As a result, we have to
779 			 * decrypt the data twice: once to generate
780 			 * the tag and a second time after the tag is
781 			 * verified.
782 			 */
783 			exf->decrypt(swe->sw_kschedule, blk);
784 			axf->Update(&ctx, blk, len);
785 		}
786 	}
787 
788 	/* Finalize MAC */
789 	axf->Final(aalg, &ctx);
790 
791 	/* Validate tag */
792 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
793 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
794 		    uaalg);
795 
796 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
797 		if (r != 0)
798 			return (EBADMSG);
799 
800 		/* tag matches, decrypt data */
801 		exf->reinit(swe->sw_kschedule, iv);
802 		for (i = 0; i < crp->crp_payload_length; i += blksz) {
803 			len = MIN(crp->crp_payload_length - i, blksz);
804 			if (len < blksz)
805 				bzero(blk, blksz);
806 			crypto_copydata(crp, crp->crp_payload_start + i, len,
807 			    blk);
808 			exf->decrypt(swe->sw_kschedule, blk);
809 			crypto_copyback(crp, crp->crp_payload_start + i, len,
810 			    blk);
811 		}
812 	} else {
813 		/* Inject the authentication data */
814 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
815 		    aalg);
816 	}
817 
818 	return (0);
819 }
820 
821 /*
822  * Apply a cipher and a digest to perform EtA.
823  */
824 static int
825 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
826 {
827 	int error;
828 
829 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
830 		error = swcr_encdec(ses, crp);
831 		if (error == 0)
832 			error = swcr_authcompute(ses, crp);
833 	} else {
834 		error = swcr_authcompute(ses, crp);
835 		if (error == 0)
836 			error = swcr_encdec(ses, crp);
837 	}
838 	return (error);
839 }
840 
841 /*
842  * Apply a compression/decompression algorithm
843  */
844 static int
845 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
846 {
847 	u_int8_t *data, *out;
848 	struct comp_algo *cxf;
849 	int adj;
850 	u_int32_t result;
851 
852 	cxf = ses->swcr_compdec.sw_cxf;
853 
854 	/* We must handle the whole buffer of data in one time
855 	 * then if there is not all the data in the mbuf, we must
856 	 * copy in a buffer.
857 	 */
858 
859 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
860 	if (data == NULL)
861 		return (EINVAL);
862 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
863 	    data);
864 
865 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
866 		result = cxf->compress(data, crp->crp_payload_length, &out);
867 	else
868 		result = cxf->decompress(data, crp->crp_payload_length, &out);
869 
870 	free(data, M_CRYPTO_DATA);
871 	if (result == 0)
872 		return (EINVAL);
873 	crp->crp_olen = result;
874 
875 	/* Check the compressed size when doing compression */
876 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
877 		if (result >= crp->crp_payload_length) {
878 			/* Compression was useless, we lost time */
879 			free(out, M_CRYPTO_DATA);
880 			return (0);
881 		}
882 	}
883 
884 	/* Copy back the (de)compressed data. m_copyback is
885 	 * extending the mbuf as necessary.
886 	 */
887 	crypto_copyback(crp, crp->crp_payload_start, result, out);
888 	if (result < crp->crp_payload_length) {
889 		switch (crp->crp_buf_type) {
890 		case CRYPTO_BUF_MBUF:
891 			adj = result - crp->crp_payload_length;
892 			m_adj(crp->crp_mbuf, adj);
893 			break;
894 		case CRYPTO_BUF_UIO: {
895 			struct uio *uio = crp->crp_uio;
896 			int ind;
897 
898 			adj = crp->crp_payload_length - result;
899 			ind = uio->uio_iovcnt - 1;
900 
901 			while (adj > 0 && ind >= 0) {
902 				if (adj < uio->uio_iov[ind].iov_len) {
903 					uio->uio_iov[ind].iov_len -= adj;
904 					break;
905 				}
906 
907 				adj -= uio->uio_iov[ind].iov_len;
908 				uio->uio_iov[ind].iov_len = 0;
909 				ind--;
910 				uio->uio_iovcnt--;
911 			}
912 			}
913 			break;
914 		}
915 	}
916 	free(out, M_CRYPTO_DATA);
917 	return 0;
918 }
919 
920 static int
921 swcr_setup_encdec(struct swcr_session *ses,
922     const struct crypto_session_params *csp)
923 {
924 	struct swcr_encdec *swe;
925 	struct enc_xform *txf;
926 	int error;
927 
928 	swe = &ses->swcr_encdec;
929 	txf = crypto_cipher(csp);
930 	MPASS(txf->ivsize == csp->csp_ivlen);
931 	if (csp->csp_cipher_key != NULL) {
932 		error = txf->setkey(&swe->sw_kschedule,
933 		    csp->csp_cipher_key, csp->csp_cipher_klen);
934 		if (error)
935 			return (error);
936 	}
937 	swe->sw_exf = txf;
938 	return (0);
939 }
940 
941 static int
942 swcr_setup_auth(struct swcr_session *ses,
943     const struct crypto_session_params *csp)
944 {
945 	struct swcr_auth *swa;
946 	struct auth_hash *axf;
947 
948 	swa = &ses->swcr_auth;
949 
950 	axf = crypto_auth_hash(csp);
951 	swa->sw_axf = axf;
952 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
953 		return (EINVAL);
954 	if (csp->csp_auth_mlen == 0)
955 		swa->sw_mlen = axf->hashsize;
956 	else
957 		swa->sw_mlen = csp->csp_auth_mlen;
958 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
959 	if (swa->sw_ictx == NULL)
960 		return (ENOBUFS);
961 
962 	switch (csp->csp_auth_alg) {
963 	case CRYPTO_MD5_HMAC:
964 	case CRYPTO_SHA1_HMAC:
965 	case CRYPTO_SHA2_224_HMAC:
966 	case CRYPTO_SHA2_256_HMAC:
967 	case CRYPTO_SHA2_384_HMAC:
968 	case CRYPTO_SHA2_512_HMAC:
969 	case CRYPTO_NULL_HMAC:
970 	case CRYPTO_RIPEMD160_HMAC:
971 		swa->sw_octx_len = axf->ctxsize;
972 		swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
973 		    M_NOWAIT);
974 		if (swa->sw_octx == NULL)
975 			return (ENOBUFS);
976 
977 		if (csp->csp_auth_key != NULL) {
978 			swcr_authprepare(axf, swa, csp->csp_auth_key,
979 			    csp->csp_auth_klen);
980 		}
981 
982 		if (csp->csp_mode == CSP_MODE_DIGEST)
983 			ses->swcr_process = swcr_authcompute;
984 		break;
985 	case CRYPTO_MD5_KPDK:
986 	case CRYPTO_SHA1_KPDK:
987 		swa->sw_octx_len = csp->csp_auth_klen;
988 		swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
989 		    M_NOWAIT);
990 		if (swa->sw_octx == NULL)
991 			return (ENOBUFS);
992 
993 		/* Store the key so we can "append" it to the payload */
994 		if (csp->csp_auth_key != NULL) {
995 			swcr_authprepare(axf, swa, csp->csp_auth_key,
996 			    csp->csp_auth_klen);
997 		}
998 
999 		if (csp->csp_mode == CSP_MODE_DIGEST)
1000 			ses->swcr_process = swcr_authcompute;
1001 		break;
1002 #ifdef notdef
1003 	case CRYPTO_MD5:
1004 #endif
1005 	case CRYPTO_SHA1:
1006 	case CRYPTO_SHA2_224:
1007 	case CRYPTO_SHA2_256:
1008 	case CRYPTO_SHA2_384:
1009 	case CRYPTO_SHA2_512:
1010 		axf->Init(swa->sw_ictx);
1011 		if (csp->csp_mode == CSP_MODE_DIGEST)
1012 			ses->swcr_process = swcr_authcompute;
1013 		break;
1014 	case CRYPTO_AES_NIST_GMAC:
1015 		axf->Init(swa->sw_ictx);
1016 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1017 		    csp->csp_auth_klen);
1018 		if (csp->csp_mode == CSP_MODE_DIGEST)
1019 			ses->swcr_process = swcr_gmac;
1020 		break;
1021 	case CRYPTO_POLY1305:
1022 	case CRYPTO_BLAKE2B:
1023 	case CRYPTO_BLAKE2S:
1024 		/*
1025 		 * Blake2b and Blake2s support an optional key but do
1026 		 * not require one.
1027 		 */
1028 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
1029 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1030 			    csp->csp_auth_klen);
1031 		axf->Init(swa->sw_ictx);
1032 		if (csp->csp_mode == CSP_MODE_DIGEST)
1033 			ses->swcr_process = swcr_authcompute;
1034 		break;
1035 	case CRYPTO_AES_CCM_CBC_MAC:
1036 		axf->Init(swa->sw_ictx);
1037 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1038 		    csp->csp_auth_klen);
1039 		if (csp->csp_mode == CSP_MODE_DIGEST)
1040 			ses->swcr_process = swcr_ccm_cbc_mac;
1041 		break;
1042 	}
1043 
1044 	return (0);
1045 }
1046 
1047 static int
1048 swcr_setup_gcm(struct swcr_session *ses,
1049     const struct crypto_session_params *csp)
1050 {
1051 	struct swcr_encdec *swe;
1052 	struct swcr_auth *swa;
1053 	struct enc_xform *txf;
1054 	struct auth_hash *axf;
1055 	int error;
1056 
1057 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
1058 		return (EINVAL);
1059 
1060 	/* First, setup the auth side. */
1061 	swa = &ses->swcr_auth;
1062 	switch (csp->csp_cipher_klen * 8) {
1063 	case 128:
1064 		axf = &auth_hash_nist_gmac_aes_128;
1065 		break;
1066 	case 192:
1067 		axf = &auth_hash_nist_gmac_aes_192;
1068 		break;
1069 	case 256:
1070 		axf = &auth_hash_nist_gmac_aes_256;
1071 		break;
1072 	default:
1073 		return (EINVAL);
1074 	}
1075 	swa->sw_axf = axf;
1076 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1077 		return (EINVAL);
1078 	if (csp->csp_auth_mlen == 0)
1079 		swa->sw_mlen = axf->hashsize;
1080 	else
1081 		swa->sw_mlen = csp->csp_auth_mlen;
1082 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1083 	if (swa->sw_ictx == NULL)
1084 		return (ENOBUFS);
1085 	axf->Init(swa->sw_ictx);
1086 	if (csp->csp_cipher_key != NULL)
1087 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1088 		    csp->csp_cipher_klen);
1089 
1090 	/* Second, setup the cipher side. */
1091 	swe = &ses->swcr_encdec;
1092 	txf = &enc_xform_aes_nist_gcm;
1093 	if (csp->csp_cipher_key != NULL) {
1094 		error = txf->setkey(&swe->sw_kschedule,
1095 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1096 		if (error)
1097 			return (error);
1098 	}
1099 	swe->sw_exf = txf;
1100 
1101 	return (0);
1102 }
1103 
1104 static int
1105 swcr_setup_ccm(struct swcr_session *ses,
1106     const struct crypto_session_params *csp)
1107 {
1108 	struct swcr_encdec *swe;
1109 	struct swcr_auth *swa;
1110 	struct enc_xform *txf;
1111 	struct auth_hash *axf;
1112 	int error;
1113 
1114 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1115 		return (EINVAL);
1116 
1117 	/* First, setup the auth side. */
1118 	swa = &ses->swcr_auth;
1119 	switch (csp->csp_cipher_klen * 8) {
1120 	case 128:
1121 		axf = &auth_hash_ccm_cbc_mac_128;
1122 		break;
1123 	case 192:
1124 		axf = &auth_hash_ccm_cbc_mac_192;
1125 		break;
1126 	case 256:
1127 		axf = &auth_hash_ccm_cbc_mac_256;
1128 		break;
1129 	default:
1130 		return (EINVAL);
1131 	}
1132 	swa->sw_axf = axf;
1133 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1134 		return (EINVAL);
1135 	if (csp->csp_auth_mlen == 0)
1136 		swa->sw_mlen = axf->hashsize;
1137 	else
1138 		swa->sw_mlen = csp->csp_auth_mlen;
1139 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1140 	if (swa->sw_ictx == NULL)
1141 		return (ENOBUFS);
1142 	axf->Init(swa->sw_ictx);
1143 	if (csp->csp_cipher_key != NULL)
1144 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1145 		    csp->csp_cipher_klen);
1146 
1147 	/* Second, setup the cipher side. */
1148 	swe = &ses->swcr_encdec;
1149 	txf = &enc_xform_ccm;
1150 	if (csp->csp_cipher_key != NULL) {
1151 		error = txf->setkey(&swe->sw_kschedule,
1152 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1153 		if (error)
1154 			return (error);
1155 	}
1156 	swe->sw_exf = txf;
1157 
1158 	return (0);
1159 }
1160 
1161 static bool
1162 swcr_auth_supported(const struct crypto_session_params *csp)
1163 {
1164 	struct auth_hash *axf;
1165 
1166 	axf = crypto_auth_hash(csp);
1167 	if (axf == NULL)
1168 		return (false);
1169 	switch (csp->csp_auth_alg) {
1170 	case CRYPTO_MD5_HMAC:
1171 	case CRYPTO_SHA1_HMAC:
1172 	case CRYPTO_SHA2_224_HMAC:
1173 	case CRYPTO_SHA2_256_HMAC:
1174 	case CRYPTO_SHA2_384_HMAC:
1175 	case CRYPTO_SHA2_512_HMAC:
1176 	case CRYPTO_NULL_HMAC:
1177 	case CRYPTO_RIPEMD160_HMAC:
1178 	case CRYPTO_MD5_KPDK:
1179 	case CRYPTO_SHA1_KPDK:
1180 		break;
1181 	case CRYPTO_AES_NIST_GMAC:
1182 		switch (csp->csp_auth_klen * 8) {
1183 		case 128:
1184 		case 192:
1185 		case 256:
1186 			break;
1187 		default:
1188 			return (false);
1189 		}
1190 		if (csp->csp_auth_key == NULL)
1191 			return (false);
1192 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1193 			return (false);
1194 		break;
1195 	case CRYPTO_POLY1305:
1196 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1197 			return (false);
1198 		break;
1199 	case CRYPTO_AES_CCM_CBC_MAC:
1200 		switch (csp->csp_auth_klen * 8) {
1201 		case 128:
1202 		case 192:
1203 		case 256:
1204 			break;
1205 		default:
1206 			return (false);
1207 		}
1208 		if (csp->csp_auth_key == NULL)
1209 			return (false);
1210 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1211 			return (false);
1212 		break;
1213 	}
1214 	return (true);
1215 }
1216 
1217 static bool
1218 swcr_cipher_supported(const struct crypto_session_params *csp)
1219 {
1220 	struct enc_xform *txf;
1221 
1222 	txf = crypto_cipher(csp);
1223 	if (txf == NULL)
1224 		return (false);
1225 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1226 	    txf->ivsize != csp->csp_ivlen)
1227 		return (false);
1228 	return (true);
1229 }
1230 
1231 static int
1232 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1233 {
1234 
1235 	if (csp->csp_flags != 0)
1236 		return (EINVAL);
1237 	switch (csp->csp_mode) {
1238 	case CSP_MODE_COMPRESS:
1239 		switch (csp->csp_cipher_alg) {
1240 		case CRYPTO_DEFLATE_COMP:
1241 			break;
1242 		default:
1243 			return (EINVAL);
1244 		}
1245 		break;
1246 	case CSP_MODE_CIPHER:
1247 		switch (csp->csp_cipher_alg) {
1248 		case CRYPTO_AES_NIST_GCM_16:
1249 		case CRYPTO_AES_CCM_16:
1250 			return (EINVAL);
1251 		default:
1252 			if (!swcr_cipher_supported(csp))
1253 				return (EINVAL);
1254 			break;
1255 		}
1256 		break;
1257 	case CSP_MODE_DIGEST:
1258 		if (!swcr_auth_supported(csp))
1259 			return (EINVAL);
1260 		break;
1261 	case CSP_MODE_AEAD:
1262 		switch (csp->csp_cipher_alg) {
1263 		case CRYPTO_AES_NIST_GCM_16:
1264 		case CRYPTO_AES_CCM_16:
1265 			break;
1266 		default:
1267 			return (EINVAL);
1268 		}
1269 		break;
1270 	case CSP_MODE_ETA:
1271 		/* AEAD algorithms cannot be used for EtA. */
1272 		switch (csp->csp_cipher_alg) {
1273 		case CRYPTO_AES_NIST_GCM_16:
1274 		case CRYPTO_AES_CCM_16:
1275 			return (EINVAL);
1276 		}
1277 		switch (csp->csp_auth_alg) {
1278 		case CRYPTO_AES_NIST_GMAC:
1279 		case CRYPTO_AES_CCM_CBC_MAC:
1280 			return (EINVAL);
1281 		}
1282 
1283 		if (!swcr_cipher_supported(csp) ||
1284 		    !swcr_auth_supported(csp))
1285 			return (EINVAL);
1286 		break;
1287 	default:
1288 		return (EINVAL);
1289 	}
1290 
1291 	return (CRYPTODEV_PROBE_SOFTWARE);
1292 }
1293 
1294 /*
1295  * Generate a new software session.
1296  */
1297 static int
1298 swcr_newsession(device_t dev, crypto_session_t cses,
1299     const struct crypto_session_params *csp)
1300 {
1301 	struct swcr_session *ses;
1302 	struct swcr_encdec *swe;
1303 	struct swcr_auth *swa;
1304 	struct comp_algo *cxf;
1305 	int error;
1306 
1307 	ses = crypto_get_driver_session(cses);
1308 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1309 
1310 	error = 0;
1311 	swe = &ses->swcr_encdec;
1312 	swa = &ses->swcr_auth;
1313 	switch (csp->csp_mode) {
1314 	case CSP_MODE_COMPRESS:
1315 		switch (csp->csp_cipher_alg) {
1316 		case CRYPTO_DEFLATE_COMP:
1317 			cxf = &comp_algo_deflate;
1318 			break;
1319 #ifdef INVARIANTS
1320 		default:
1321 			panic("bad compression algo");
1322 #endif
1323 		}
1324 		ses->swcr_compdec.sw_cxf = cxf;
1325 		ses->swcr_process = swcr_compdec;
1326 		break;
1327 	case CSP_MODE_CIPHER:
1328 		switch (csp->csp_cipher_alg) {
1329 		case CRYPTO_NULL_CBC:
1330 			ses->swcr_process = swcr_null;
1331 			break;
1332 #ifdef INVARIANTS
1333 		case CRYPTO_AES_NIST_GCM_16:
1334 		case CRYPTO_AES_CCM_16:
1335 			panic("bad cipher algo");
1336 #endif
1337 		default:
1338 			error = swcr_setup_encdec(ses, csp);
1339 			if (error == 0)
1340 				ses->swcr_process = swcr_encdec;
1341 		}
1342 		break;
1343 	case CSP_MODE_DIGEST:
1344 		error = swcr_setup_auth(ses, csp);
1345 		break;
1346 	case CSP_MODE_AEAD:
1347 		switch (csp->csp_cipher_alg) {
1348 		case CRYPTO_AES_NIST_GCM_16:
1349 			error = swcr_setup_gcm(ses, csp);
1350 			if (error == 0)
1351 				ses->swcr_process = swcr_gcm;
1352 			break;
1353 		case CRYPTO_AES_CCM_16:
1354 			error = swcr_setup_ccm(ses, csp);
1355 			if (error == 0)
1356 				ses->swcr_process = swcr_ccm;
1357 			break;
1358 #ifdef INVARIANTS
1359 		default:
1360 			panic("bad aead algo");
1361 #endif
1362 		}
1363 		break;
1364 	case CSP_MODE_ETA:
1365 #ifdef INVARIANTS
1366 		switch (csp->csp_cipher_alg) {
1367 		case CRYPTO_AES_NIST_GCM_16:
1368 		case CRYPTO_AES_CCM_16:
1369 			panic("bad eta cipher algo");
1370 		}
1371 		switch (csp->csp_auth_alg) {
1372 		case CRYPTO_AES_NIST_GMAC:
1373 		case CRYPTO_AES_CCM_CBC_MAC:
1374 			panic("bad eta auth algo");
1375 		}
1376 #endif
1377 
1378 		error = swcr_setup_auth(ses, csp);
1379 		if (error)
1380 			break;
1381 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1382 			/* Effectively degrade to digest mode. */
1383 			ses->swcr_process = swcr_authcompute;
1384 			break;
1385 		}
1386 
1387 		error = swcr_setup_encdec(ses, csp);
1388 		if (error == 0)
1389 			ses->swcr_process = swcr_eta;
1390 		break;
1391 	default:
1392 		error = EINVAL;
1393 	}
1394 
1395 	if (error)
1396 		swcr_freesession(dev, cses);
1397 	return (error);
1398 }
1399 
1400 static void
1401 swcr_freesession(device_t dev, crypto_session_t cses)
1402 {
1403 	struct swcr_session *ses;
1404 	struct swcr_auth *swa;
1405 	struct enc_xform *txf;
1406 	struct auth_hash *axf;
1407 
1408 	ses = crypto_get_driver_session(cses);
1409 
1410 	mtx_destroy(&ses->swcr_lock);
1411 
1412 	txf = ses->swcr_encdec.sw_exf;
1413 	if (txf != NULL) {
1414 		if (ses->swcr_encdec.sw_kschedule != NULL)
1415 			txf->zerokey(&(ses->swcr_encdec.sw_kschedule));
1416 	}
1417 
1418 	axf = ses->swcr_auth.sw_axf;
1419 	if (axf != NULL) {
1420 		swa = &ses->swcr_auth;
1421 		if (swa->sw_ictx != NULL) {
1422 			explicit_bzero(swa->sw_ictx, axf->ctxsize);
1423 			free(swa->sw_ictx, M_CRYPTO_DATA);
1424 		}
1425 		if (swa->sw_octx != NULL) {
1426 			explicit_bzero(swa->sw_octx, swa->sw_octx_len);
1427 			free(swa->sw_octx, M_CRYPTO_DATA);
1428 		}
1429 	}
1430 }
1431 
1432 /*
1433  * Process a software request.
1434  */
1435 static int
1436 swcr_process(device_t dev, struct cryptop *crp, int hint)
1437 {
1438 	struct swcr_session *ses;
1439 
1440 	ses = crypto_get_driver_session(crp->crp_session);
1441 	mtx_lock(&ses->swcr_lock);
1442 
1443 	crp->crp_etype = ses->swcr_process(ses, crp);
1444 
1445 	mtx_unlock(&ses->swcr_lock);
1446 	crypto_done(crp);
1447 	return (0);
1448 }
1449 
1450 static void
1451 swcr_identify(driver_t *drv, device_t parent)
1452 {
1453 	/* NB: order 10 is so we get attached after h/w devices */
1454 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1455 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1456 		panic("cryptosoft: could not attach");
1457 }
1458 
1459 static int
1460 swcr_probe(device_t dev)
1461 {
1462 	device_set_desc(dev, "software crypto");
1463 	return (BUS_PROBE_NOWILDCARD);
1464 }
1465 
1466 static int
1467 swcr_attach(device_t dev)
1468 {
1469 
1470 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1471 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1472 	if (swcr_id < 0) {
1473 		device_printf(dev, "cannot initialize!");
1474 		return (ENXIO);
1475 	}
1476 
1477 	return (0);
1478 }
1479 
1480 static int
1481 swcr_detach(device_t dev)
1482 {
1483 	crypto_unregister_all(swcr_id);
1484 	return 0;
1485 }
1486 
1487 static device_method_t swcr_methods[] = {
1488 	DEVMETHOD(device_identify,	swcr_identify),
1489 	DEVMETHOD(device_probe,		swcr_probe),
1490 	DEVMETHOD(device_attach,	swcr_attach),
1491 	DEVMETHOD(device_detach,	swcr_detach),
1492 
1493 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1494 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1495 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1496 	DEVMETHOD(cryptodev_process,	swcr_process),
1497 
1498 	{0, 0},
1499 };
1500 
1501 static driver_t swcr_driver = {
1502 	"cryptosoft",
1503 	swcr_methods,
1504 	0,		/* NB: no softc */
1505 };
1506 static devclass_t swcr_devclass;
1507 
1508 /*
1509  * NB: We explicitly reference the crypto module so we
1510  * get the necessary ordering when built as a loadable
1511  * module.  This is required because we bundle the crypto
1512  * module code together with the cryptosoft driver (otherwise
1513  * normal module dependencies would handle things).
1514  */
1515 extern int crypto_modevent(struct module *, int, void *);
1516 /* XXX where to attach */
1517 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1518 MODULE_VERSION(cryptosoft, 1);
1519 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1520