xref: /freebsd/sys/opencrypto/cryptosoft.c (revision 7a33c92b43a06d7f783d5958f20fbfe334d1776e)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 };
66 
67 struct swcr_encdec {
68 	void		*sw_kschedule;
69 	struct enc_xform *sw_exf;
70 };
71 
72 struct swcr_compdec {
73 	struct comp_algo *sw_cxf;
74 };
75 
76 struct swcr_session {
77 	struct mtx	swcr_lock;
78 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	const struct crypto_session_params *csp;
106 	struct swcr_encdec *sw;
107 	struct enc_xform *exf;
108 	int i, blks, inlen, ivlen, outlen, resid;
109 	struct crypto_buffer_cursor cc_in, cc_out;
110 	const char *inblk;
111 	char *outblk;
112 	int error;
113 	bool encrypting;
114 
115 	error = 0;
116 
117 	sw = &ses->swcr_encdec;
118 	exf = sw->sw_exf;
119 	ivlen = exf->ivsize;
120 
121 	if (exf->native_blocksize == 0) {
122 		/* Check for non-padded data */
123 		if ((crp->crp_payload_length % exf->blocksize) != 0)
124 			return (EINVAL);
125 
126 		blks = exf->blocksize;
127 	} else
128 		blks = exf->native_blocksize;
129 
130 	if (exf == &enc_xform_aes_icm &&
131 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
132 		return (EINVAL);
133 
134 	if (crp->crp_cipher_key != NULL) {
135 		csp = crypto_get_params(crp->crp_session);
136 		error = exf->setkey(sw->sw_kschedule,
137 		    crp->crp_cipher_key, csp->csp_cipher_klen);
138 		if (error)
139 			return (error);
140 	}
141 
142 	crypto_read_iv(crp, iv);
143 
144 	if (exf->reinit) {
145 		/*
146 		 * xforms that provide a reinit method perform all IV
147 		 * handling themselves.
148 		 */
149 		exf->reinit(sw->sw_kschedule, iv);
150 	}
151 
152 	ivp = iv;
153 
154 	crypto_cursor_init(&cc_in, &crp->crp_buf);
155 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
156 	inlen = crypto_cursor_seglen(&cc_in);
157 	inblk = crypto_cursor_segbase(&cc_in);
158 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
159 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
160 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
161 	} else
162 		cc_out = cc_in;
163 	outlen = crypto_cursor_seglen(&cc_out);
164 	outblk = crypto_cursor_segbase(&cc_out);
165 
166 	resid = crp->crp_payload_length;
167 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
168 
169 	/*
170 	 * Loop through encrypting blocks.  'inlen' is the remaining
171 	 * length of the current segment in the input buffer.
172 	 * 'outlen' is the remaining length of current segment in the
173 	 * output buffer.
174 	 */
175 	while (resid >= blks) {
176 		/*
177 		 * If the current block is not contained within the
178 		 * current input/output segment, use 'blk' as a local
179 		 * buffer.
180 		 */
181 		if (inlen < blks) {
182 			crypto_cursor_copydata(&cc_in, blks, blk);
183 			inblk = blk;
184 		}
185 		if (outlen < blks)
186 			outblk = blk;
187 
188 		/*
189 		 * Ciphers without a 'reinit' hook are assumed to be
190 		 * used in CBC mode where the chaining is done here.
191 		 */
192 		if (exf->reinit != NULL) {
193 			if (encrypting)
194 				exf->encrypt(sw->sw_kschedule, inblk, outblk);
195 			else
196 				exf->decrypt(sw->sw_kschedule, inblk, outblk);
197 		} else if (encrypting) {
198 			/* XOR with previous block */
199 			for (i = 0; i < blks; i++)
200 				outblk[i] = inblk[i] ^ ivp[i];
201 
202 			exf->encrypt(sw->sw_kschedule, outblk, outblk);
203 
204 			/*
205 			 * Keep encrypted block for XOR'ing
206 			 * with next block
207 			 */
208 			memcpy(iv, outblk, blks);
209 			ivp = iv;
210 		} else {	/* decrypt */
211 			/*
212 			 * Keep encrypted block for XOR'ing
213 			 * with next block
214 			 */
215 			nivp = (ivp == iv) ? iv2 : iv;
216 			memcpy(nivp, inblk, blks);
217 
218 			exf->decrypt(sw->sw_kschedule, inblk, outblk);
219 
220 			/* XOR with previous block */
221 			for (i = 0; i < blks; i++)
222 				outblk[i] ^= ivp[i];
223 
224 			ivp = nivp;
225 		}
226 
227 		if (inlen < blks) {
228 			inlen = crypto_cursor_seglen(&cc_in);
229 			inblk = crypto_cursor_segbase(&cc_in);
230 		} else {
231 			crypto_cursor_advance(&cc_in, blks);
232 			inlen -= blks;
233 			inblk += blks;
234 		}
235 
236 		if (outlen < blks) {
237 			crypto_cursor_copyback(&cc_out, blks, blk);
238 			outlen = crypto_cursor_seglen(&cc_out);
239 			outblk = crypto_cursor_segbase(&cc_out);
240 		} else {
241 			crypto_cursor_advance(&cc_out, blks);
242 			outlen -= blks;
243 			outblk += blks;
244 		}
245 
246 		resid -= blks;
247 	}
248 
249 	/* Handle trailing partial block for stream ciphers. */
250 	if (resid > 0) {
251 		KASSERT(exf->native_blocksize != 0,
252 		    ("%s: partial block of %d bytes for cipher %s",
253 		    __func__, i, exf->name));
254 		KASSERT(exf->reinit != NULL,
255 		    ("%s: partial block cipher %s without reinit hook",
256 		    __func__, exf->name));
257 		KASSERT(resid < blks, ("%s: partial block too big", __func__));
258 
259 		inlen = crypto_cursor_seglen(&cc_in);
260 		outlen = crypto_cursor_seglen(&cc_out);
261 		if (inlen < resid) {
262 			crypto_cursor_copydata(&cc_in, resid, blk);
263 			inblk = blk;
264 		} else
265 			inblk = crypto_cursor_segbase(&cc_in);
266 		if (outlen < resid)
267 			outblk = blk;
268 		else
269 			outblk = crypto_cursor_segbase(&cc_out);
270 		if (encrypting)
271 			exf->encrypt_last(sw->sw_kschedule, inblk, outblk,
272 			    resid);
273 		else
274 			exf->decrypt_last(sw->sw_kschedule, inblk, outblk,
275 			    resid);
276 		if (outlen < resid)
277 			crypto_cursor_copyback(&cc_out, resid, blk);
278 	}
279 
280 	explicit_bzero(blk, sizeof(blk));
281 	explicit_bzero(iv, sizeof(iv));
282 	explicit_bzero(iv2, sizeof(iv2));
283 	return (0);
284 }
285 
286 static void
287 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
288     const uint8_t *key, int klen)
289 {
290 
291 	switch (axf->type) {
292 	case CRYPTO_SHA1_HMAC:
293 	case CRYPTO_SHA2_224_HMAC:
294 	case CRYPTO_SHA2_256_HMAC:
295 	case CRYPTO_SHA2_384_HMAC:
296 	case CRYPTO_SHA2_512_HMAC:
297 	case CRYPTO_NULL_HMAC:
298 	case CRYPTO_RIPEMD160_HMAC:
299 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
300 		hmac_init_opad(axf, key, klen, sw->sw_octx);
301 		break;
302 	case CRYPTO_POLY1305:
303 	case CRYPTO_BLAKE2B:
304 	case CRYPTO_BLAKE2S:
305 		axf->Setkey(sw->sw_ictx, key, klen);
306 		axf->Init(sw->sw_ictx);
307 		break;
308 	default:
309 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
310 	}
311 }
312 
313 /*
314  * Compute or verify hash.
315  */
316 static int
317 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
318 {
319 	u_char aalg[HASH_MAX_LEN];
320 	const struct crypto_session_params *csp;
321 	struct swcr_auth *sw;
322 	struct auth_hash *axf;
323 	union authctx ctx;
324 	int err;
325 
326 	sw = &ses->swcr_auth;
327 
328 	axf = sw->sw_axf;
329 
330 	if (crp->crp_auth_key != NULL) {
331 		csp = crypto_get_params(crp->crp_session);
332 		swcr_authprepare(axf, sw, crp->crp_auth_key,
333 		    csp->csp_auth_klen);
334 	}
335 
336 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
337 
338 	err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
339 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
340 	if (err)
341 		return err;
342 
343 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
344 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
345 		err = crypto_apply_buf(&crp->crp_obuf,
346 		    crp->crp_payload_output_start, crp->crp_payload_length,
347 		    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
348 	else
349 		err = crypto_apply(crp, crp->crp_payload_start,
350 		    crp->crp_payload_length,
351 		    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
352 	if (err)
353 		return err;
354 
355 	switch (axf->type) {
356 	case CRYPTO_SHA1:
357 	case CRYPTO_SHA2_224:
358 	case CRYPTO_SHA2_256:
359 	case CRYPTO_SHA2_384:
360 	case CRYPTO_SHA2_512:
361 		axf->Final(aalg, &ctx);
362 		break;
363 
364 	case CRYPTO_SHA1_HMAC:
365 	case CRYPTO_SHA2_224_HMAC:
366 	case CRYPTO_SHA2_256_HMAC:
367 	case CRYPTO_SHA2_384_HMAC:
368 	case CRYPTO_SHA2_512_HMAC:
369 	case CRYPTO_RIPEMD160_HMAC:
370 		if (sw->sw_octx == NULL)
371 			return EINVAL;
372 
373 		axf->Final(aalg, &ctx);
374 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
375 		axf->Update(&ctx, aalg, axf->hashsize);
376 		axf->Final(aalg, &ctx);
377 		break;
378 
379 	case CRYPTO_BLAKE2B:
380 	case CRYPTO_BLAKE2S:
381 	case CRYPTO_NULL_HMAC:
382 	case CRYPTO_POLY1305:
383 		axf->Final(aalg, &ctx);
384 		break;
385 	}
386 
387 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
388 		u_char uaalg[HASH_MAX_LEN];
389 
390 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
391 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
392 			err = EBADMSG;
393 		explicit_bzero(uaalg, sizeof(uaalg));
394 	} else {
395 		/* Inject the authentication data */
396 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
397 	}
398 	explicit_bzero(aalg, sizeof(aalg));
399 	return (err);
400 }
401 
402 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
403 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
404 
405 static int
406 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
407 {
408 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
409 	u_char *blk = (u_char *)blkbuf;
410 	u_char aalg[AALG_MAX_RESULT_LEN];
411 	u_char iv[EALG_MAX_BLOCK_LEN];
412 	struct crypto_buffer_cursor cc;
413 	union authctx ctx;
414 	struct swcr_auth *swa;
415 	struct auth_hash *axf;
416 	uint32_t *blkp;
417 	int blksz, error, ivlen, len, resid;
418 
419 	swa = &ses->swcr_auth;
420 	axf = swa->sw_axf;
421 
422 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
423 	blksz = axf->blocksize;
424 
425 	/* Initialize the IV */
426 	ivlen = AES_GCM_IV_LEN;
427 	crypto_read_iv(crp, iv);
428 
429 	axf->Reinit(&ctx, iv, ivlen);
430 	crypto_cursor_init(&cc, &crp->crp_buf);
431 	crypto_cursor_advance(&cc, crp->crp_payload_start);
432 	for (resid = crp->crp_payload_length; resid > 0; resid -= len) {
433 		len = MIN(resid, blksz);
434 		crypto_cursor_copydata(&cc, len, blk);
435 		bzero(blk + len, blksz - len);
436 		axf->Update(&ctx, blk, blksz);
437 	}
438 
439 	/* length block */
440 	bzero(blk, blksz);
441 	blkp = (uint32_t *)blk + 1;
442 	*blkp = htobe32(crp->crp_payload_length * 8);
443 	axf->Update(&ctx, blk, blksz);
444 
445 	/* Finalize MAC */
446 	axf->Final(aalg, &ctx);
447 
448 	error = 0;
449 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
450 		u_char uaalg[AALG_MAX_RESULT_LEN];
451 
452 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
453 		    uaalg);
454 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
455 			error = EBADMSG;
456 		explicit_bzero(uaalg, sizeof(uaalg));
457 	} else {
458 		/* Inject the authentication data */
459 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
460 	}
461 	explicit_bzero(blkbuf, sizeof(blkbuf));
462 	explicit_bzero(aalg, sizeof(aalg));
463 	explicit_bzero(iv, sizeof(iv));
464 	return (error);
465 }
466 
467 static int
468 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
469 {
470 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
471 	u_char *blk = (u_char *)blkbuf;
472 	u_char aalg[AALG_MAX_RESULT_LEN];
473 	u_char iv[EALG_MAX_BLOCK_LEN];
474 	struct crypto_buffer_cursor cc_in, cc_out;
475 	union authctx ctx;
476 	struct swcr_auth *swa;
477 	struct swcr_encdec *swe;
478 	struct auth_hash *axf;
479 	struct enc_xform *exf;
480 	uint32_t *blkp;
481 	int blksz, error, ivlen, len, r, resid;
482 
483 	swa = &ses->swcr_auth;
484 	axf = swa->sw_axf;
485 
486 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
487 	blksz = axf->blocksize;
488 
489 	swe = &ses->swcr_encdec;
490 	exf = swe->sw_exf;
491 	KASSERT(axf->blocksize == exf->native_blocksize,
492 	    ("%s: blocksize mismatch", __func__));
493 
494 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
495 		return (EINVAL);
496 
497 	/* Initialize the IV */
498 	ivlen = AES_GCM_IV_LEN;
499 	bcopy(crp->crp_iv, iv, ivlen);
500 
501 	/* Supply MAC with IV */
502 	axf->Reinit(&ctx, iv, ivlen);
503 
504 	/* Supply MAC with AAD */
505 	crypto_cursor_init(&cc_in, &crp->crp_buf);
506 	crypto_cursor_advance(&cc_in, crp->crp_aad_start);
507 	for (resid = crp->crp_aad_length; resid > 0; resid -= len) {
508 		len = MIN(resid, blksz);
509 		crypto_cursor_copydata(&cc_in, len, blk);
510 		bzero(blk + len, blksz - len);
511 		axf->Update(&ctx, blk, blksz);
512 	}
513 
514 	exf->reinit(swe->sw_kschedule, iv);
515 
516 	/* Do encryption with MAC */
517 	crypto_cursor_init(&cc_in, &crp->crp_buf);
518 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
519 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
520 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
521 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
522 	} else
523 		cc_out = cc_in;
524 	for (resid = crp->crp_payload_length; resid > 0; resid -= len) {
525 		len = MIN(resid, blksz);
526 		if (len < blksz)
527 			bzero(blk, blksz);
528 		crypto_cursor_copydata(&cc_in, len, blk);
529 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
530 			exf->encrypt(swe->sw_kschedule, blk, blk);
531 			axf->Update(&ctx, blk, len);
532 			crypto_cursor_copyback(&cc_out, len, blk);
533 		} else {
534 			axf->Update(&ctx, blk, len);
535 		}
536 	}
537 
538 	/* length block */
539 	bzero(blk, blksz);
540 	blkp = (uint32_t *)blk + 1;
541 	*blkp = htobe32(crp->crp_aad_length * 8);
542 	blkp = (uint32_t *)blk + 3;
543 	*blkp = htobe32(crp->crp_payload_length * 8);
544 	axf->Update(&ctx, blk, blksz);
545 
546 	/* Finalize MAC */
547 	axf->Final(aalg, &ctx);
548 
549 	/* Validate tag */
550 	error = 0;
551 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
552 		u_char uaalg[AALG_MAX_RESULT_LEN];
553 
554 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
555 		    uaalg);
556 
557 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
558 		explicit_bzero(uaalg, sizeof(uaalg));
559 		if (r != 0) {
560 			error = EBADMSG;
561 			goto out;
562 		}
563 
564 		/* tag matches, decrypt data */
565 		crypto_cursor_init(&cc_in, &crp->crp_buf);
566 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
567 		for (resid = crp->crp_payload_length; resid > 0;
568 		     resid -= len) {
569 			len = MIN(resid, blksz);
570 			if (len < blksz)
571 				bzero(blk, blksz);
572 			crypto_cursor_copydata(&cc_in, len, blk);
573 			exf->decrypt(swe->sw_kschedule, blk, blk);
574 			crypto_cursor_copyback(&cc_out, len, blk);
575 		}
576 	} else {
577 		/* Inject the authentication data */
578 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
579 		    aalg);
580 	}
581 
582 out:
583 	explicit_bzero(blkbuf, sizeof(blkbuf));
584 	explicit_bzero(aalg, sizeof(aalg));
585 	explicit_bzero(iv, sizeof(iv));
586 
587 	return (error);
588 }
589 
590 static int
591 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
592 {
593 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
594 	u_char *blk = (u_char *)blkbuf;
595 	u_char aalg[AALG_MAX_RESULT_LEN];
596 	u_char iv[EALG_MAX_BLOCK_LEN];
597 	struct crypto_buffer_cursor cc;
598 	union authctx ctx;
599 	struct swcr_auth *swa;
600 	struct auth_hash *axf;
601 	int blksz, error, ivlen, len, resid;
602 
603 	swa = &ses->swcr_auth;
604 	axf = swa->sw_axf;
605 
606 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
607 	blksz = axf->blocksize;
608 
609 	/* Initialize the IV */
610 	ivlen = AES_CCM_IV_LEN;
611 	crypto_read_iv(crp, iv);
612 
613 	/*
614 	 * AES CCM-CBC-MAC needs to know the length of both the auth
615 	 * data and payload data before doing the auth computation.
616 	 */
617 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
618 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
619 
620 	axf->Reinit(&ctx, iv, ivlen);
621 	crypto_cursor_init(&cc, &crp->crp_buf);
622 	crypto_cursor_advance(&cc, crp->crp_aad_start);
623 	for (resid = crp->crp_payload_length; resid > 0; resid -= len) {
624 		len = MIN(resid, blksz);
625 		crypto_cursor_copydata(&cc, len, blk);
626 		bzero(blk + len, blksz - len);
627 		axf->Update(&ctx, blk, blksz);
628 	}
629 
630 	/* Finalize MAC */
631 	axf->Final(aalg, &ctx);
632 
633 	error = 0;
634 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
635 		u_char uaalg[AALG_MAX_RESULT_LEN];
636 
637 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
638 		    uaalg);
639 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
640 			error = EBADMSG;
641 		explicit_bzero(uaalg, sizeof(uaalg));
642 	} else {
643 		/* Inject the authentication data */
644 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
645 	}
646 	explicit_bzero(blkbuf, sizeof(blkbuf));
647 	explicit_bzero(aalg, sizeof(aalg));
648 	explicit_bzero(iv, sizeof(iv));
649 	return (error);
650 }
651 
652 static int
653 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
654 {
655 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
656 	u_char *blk = (u_char *)blkbuf;
657 	u_char aalg[AALG_MAX_RESULT_LEN];
658 	u_char iv[EALG_MAX_BLOCK_LEN];
659 	struct crypto_buffer_cursor cc_in, cc_out;
660 	union authctx ctx;
661 	struct swcr_auth *swa;
662 	struct swcr_encdec *swe;
663 	struct auth_hash *axf;
664 	struct enc_xform *exf;
665 	int blksz, error, ivlen, len, r, resid;
666 
667 	swa = &ses->swcr_auth;
668 	axf = swa->sw_axf;
669 
670 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
671 	blksz = axf->blocksize;
672 
673 	swe = &ses->swcr_encdec;
674 	exf = swe->sw_exf;
675 	KASSERT(axf->blocksize == exf->native_blocksize,
676 	    ("%s: blocksize mismatch", __func__));
677 
678 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
679 		return (EINVAL);
680 
681 	/* Initialize the IV */
682 	ivlen = AES_CCM_IV_LEN;
683 	bcopy(crp->crp_iv, iv, ivlen);
684 
685 	/*
686 	 * AES CCM-CBC-MAC needs to know the length of both the auth
687 	 * data and payload data before doing the auth computation.
688 	 */
689 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
690 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
691 
692 	/* Supply MAC with IV */
693 	axf->Reinit(&ctx, iv, ivlen);
694 
695 	/* Supply MAC with AAD */
696 	crypto_cursor_init(&cc_in, &crp->crp_buf);
697 	crypto_cursor_advance(&cc_in, crp->crp_aad_start);
698 	for (resid = crp->crp_aad_length; resid > 0; resid -= len) {
699 		len = MIN(resid, blksz);
700 		crypto_cursor_copydata(&cc_in, len, blk);
701 		bzero(blk + len, blksz - len);
702 		axf->Update(&ctx, blk, blksz);
703 	}
704 
705 	exf->reinit(swe->sw_kschedule, iv);
706 
707 	/* Do encryption/decryption with MAC */
708 	crypto_cursor_init(&cc_in, &crp->crp_buf);
709 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
710 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
711 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
712 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
713 	} else
714 		cc_out = cc_in;
715 	for (resid = crp->crp_payload_length; resid > 0; resid -= len) {
716 		len = MIN(resid, blksz);
717 		if (len < blksz)
718 			bzero(blk, blksz);
719 		crypto_cursor_copydata(&cc_in, len, blk);
720 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
721 			axf->Update(&ctx, blk, len);
722 			exf->encrypt(swe->sw_kschedule, blk, blk);
723 			crypto_cursor_copyback(&cc_out, len, blk);
724 		} else {
725 			/*
726 			 * One of the problems with CCM+CBC is that
727 			 * the authentication is done on the
728 			 * unecncrypted data.  As a result, we have to
729 			 * decrypt the data twice: once to generate
730 			 * the tag and a second time after the tag is
731 			 * verified.
732 			 */
733 			exf->decrypt(swe->sw_kschedule, blk, blk);
734 			axf->Update(&ctx, blk, len);
735 		}
736 	}
737 
738 	/* Finalize MAC */
739 	axf->Final(aalg, &ctx);
740 
741 	/* Validate tag */
742 	error = 0;
743 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
744 		u_char uaalg[AALG_MAX_RESULT_LEN];
745 
746 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
747 		    uaalg);
748 
749 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
750 		explicit_bzero(uaalg, sizeof(uaalg));
751 		if (r != 0) {
752 			error = EBADMSG;
753 			goto out;
754 		}
755 
756 		/* tag matches, decrypt data */
757 		exf->reinit(swe->sw_kschedule, iv);
758 		crypto_cursor_init(&cc_in, &crp->crp_buf);
759 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
760 		for (resid = crp->crp_payload_length; resid > 0;
761 		     resid -= len) {
762 			len = MIN(resid, blksz);
763 			if (len < blksz)
764 				bzero(blk, blksz);
765 			crypto_cursor_copydata(&cc_in, len, blk);
766 			exf->decrypt(swe->sw_kschedule, blk, blk);
767 			crypto_cursor_copyback(&cc_out, len, blk);
768 		}
769 	} else {
770 		/* Inject the authentication data */
771 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
772 		    aalg);
773 	}
774 
775 out:
776 	explicit_bzero(blkbuf, sizeof(blkbuf));
777 	explicit_bzero(aalg, sizeof(aalg));
778 	explicit_bzero(iv, sizeof(iv));
779 	return (error);
780 }
781 
782 /*
783  * Apply a cipher and a digest to perform EtA.
784  */
785 static int
786 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
787 {
788 	int error;
789 
790 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
791 		error = swcr_encdec(ses, crp);
792 		if (error == 0)
793 			error = swcr_authcompute(ses, crp);
794 	} else {
795 		error = swcr_authcompute(ses, crp);
796 		if (error == 0)
797 			error = swcr_encdec(ses, crp);
798 	}
799 	return (error);
800 }
801 
802 /*
803  * Apply a compression/decompression algorithm
804  */
805 static int
806 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
807 {
808 	u_int8_t *data, *out;
809 	struct comp_algo *cxf;
810 	int adj;
811 	u_int32_t result;
812 
813 	cxf = ses->swcr_compdec.sw_cxf;
814 
815 	/* We must handle the whole buffer of data in one time
816 	 * then if there is not all the data in the mbuf, we must
817 	 * copy in a buffer.
818 	 */
819 
820 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
821 	if (data == NULL)
822 		return (EINVAL);
823 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
824 	    data);
825 
826 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
827 		result = cxf->compress(data, crp->crp_payload_length, &out);
828 	else
829 		result = cxf->decompress(data, crp->crp_payload_length, &out);
830 
831 	free(data, M_CRYPTO_DATA);
832 	if (result == 0)
833 		return (EINVAL);
834 	crp->crp_olen = result;
835 
836 	/* Check the compressed size when doing compression */
837 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
838 		if (result >= crp->crp_payload_length) {
839 			/* Compression was useless, we lost time */
840 			free(out, M_CRYPTO_DATA);
841 			return (0);
842 		}
843 	}
844 
845 	/* Copy back the (de)compressed data. m_copyback is
846 	 * extending the mbuf as necessary.
847 	 */
848 	crypto_copyback(crp, crp->crp_payload_start, result, out);
849 	if (result < crp->crp_payload_length) {
850 		switch (crp->crp_buf.cb_type) {
851 		case CRYPTO_BUF_MBUF:
852 			adj = result - crp->crp_payload_length;
853 			m_adj(crp->crp_buf.cb_mbuf, adj);
854 			break;
855 		case CRYPTO_BUF_UIO: {
856 			struct uio *uio = crp->crp_buf.cb_uio;
857 			int ind;
858 
859 			adj = crp->crp_payload_length - result;
860 			ind = uio->uio_iovcnt - 1;
861 
862 			while (adj > 0 && ind >= 0) {
863 				if (adj < uio->uio_iov[ind].iov_len) {
864 					uio->uio_iov[ind].iov_len -= adj;
865 					break;
866 				}
867 
868 				adj -= uio->uio_iov[ind].iov_len;
869 				uio->uio_iov[ind].iov_len = 0;
870 				ind--;
871 				uio->uio_iovcnt--;
872 			}
873 			}
874 			break;
875 		default:
876 			break;
877 		}
878 	}
879 	free(out, M_CRYPTO_DATA);
880 	return 0;
881 }
882 
883 static int
884 swcr_setup_cipher(struct swcr_session *ses,
885     const struct crypto_session_params *csp)
886 {
887 	struct swcr_encdec *swe;
888 	struct enc_xform *txf;
889 	int error;
890 
891 	swe = &ses->swcr_encdec;
892 	txf = crypto_cipher(csp);
893 	MPASS(txf->ivsize == csp->csp_ivlen);
894 	if (txf->ctxsize != 0) {
895 		swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA,
896 		    M_NOWAIT);
897 		if (swe->sw_kschedule == NULL)
898 			return (ENOMEM);
899 	}
900 	if (csp->csp_cipher_key != NULL) {
901 		error = txf->setkey(swe->sw_kschedule,
902 		    csp->csp_cipher_key, csp->csp_cipher_klen);
903 		if (error)
904 			return (error);
905 	}
906 	swe->sw_exf = txf;
907 	return (0);
908 }
909 
910 static int
911 swcr_setup_auth(struct swcr_session *ses,
912     const struct crypto_session_params *csp)
913 {
914 	struct swcr_auth *swa;
915 	struct auth_hash *axf;
916 
917 	swa = &ses->swcr_auth;
918 
919 	axf = crypto_auth_hash(csp);
920 	swa->sw_axf = axf;
921 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
922 		return (EINVAL);
923 	if (csp->csp_auth_mlen == 0)
924 		swa->sw_mlen = axf->hashsize;
925 	else
926 		swa->sw_mlen = csp->csp_auth_mlen;
927 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
928 	if (swa->sw_ictx == NULL)
929 		return (ENOBUFS);
930 
931 	switch (csp->csp_auth_alg) {
932 	case CRYPTO_SHA1_HMAC:
933 	case CRYPTO_SHA2_224_HMAC:
934 	case CRYPTO_SHA2_256_HMAC:
935 	case CRYPTO_SHA2_384_HMAC:
936 	case CRYPTO_SHA2_512_HMAC:
937 	case CRYPTO_NULL_HMAC:
938 	case CRYPTO_RIPEMD160_HMAC:
939 		swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
940 		    M_NOWAIT);
941 		if (swa->sw_octx == NULL)
942 			return (ENOBUFS);
943 
944 		if (csp->csp_auth_key != NULL) {
945 			swcr_authprepare(axf, swa, csp->csp_auth_key,
946 			    csp->csp_auth_klen);
947 		}
948 
949 		if (csp->csp_mode == CSP_MODE_DIGEST)
950 			ses->swcr_process = swcr_authcompute;
951 		break;
952 	case CRYPTO_SHA1:
953 	case CRYPTO_SHA2_224:
954 	case CRYPTO_SHA2_256:
955 	case CRYPTO_SHA2_384:
956 	case CRYPTO_SHA2_512:
957 		axf->Init(swa->sw_ictx);
958 		if (csp->csp_mode == CSP_MODE_DIGEST)
959 			ses->swcr_process = swcr_authcompute;
960 		break;
961 	case CRYPTO_AES_NIST_GMAC:
962 		axf->Init(swa->sw_ictx);
963 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
964 		    csp->csp_auth_klen);
965 		if (csp->csp_mode == CSP_MODE_DIGEST)
966 			ses->swcr_process = swcr_gmac;
967 		break;
968 	case CRYPTO_POLY1305:
969 	case CRYPTO_BLAKE2B:
970 	case CRYPTO_BLAKE2S:
971 		/*
972 		 * Blake2b and Blake2s support an optional key but do
973 		 * not require one.
974 		 */
975 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
976 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
977 			    csp->csp_auth_klen);
978 		axf->Init(swa->sw_ictx);
979 		if (csp->csp_mode == CSP_MODE_DIGEST)
980 			ses->swcr_process = swcr_authcompute;
981 		break;
982 	case CRYPTO_AES_CCM_CBC_MAC:
983 		axf->Init(swa->sw_ictx);
984 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
985 		    csp->csp_auth_klen);
986 		if (csp->csp_mode == CSP_MODE_DIGEST)
987 			ses->swcr_process = swcr_ccm_cbc_mac;
988 		break;
989 	}
990 
991 	return (0);
992 }
993 
994 static int
995 swcr_setup_gcm(struct swcr_session *ses,
996     const struct crypto_session_params *csp)
997 {
998 	struct swcr_auth *swa;
999 	struct auth_hash *axf;
1000 
1001 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
1002 		return (EINVAL);
1003 
1004 	/* First, setup the auth side. */
1005 	swa = &ses->swcr_auth;
1006 	switch (csp->csp_cipher_klen * 8) {
1007 	case 128:
1008 		axf = &auth_hash_nist_gmac_aes_128;
1009 		break;
1010 	case 192:
1011 		axf = &auth_hash_nist_gmac_aes_192;
1012 		break;
1013 	case 256:
1014 		axf = &auth_hash_nist_gmac_aes_256;
1015 		break;
1016 	default:
1017 		return (EINVAL);
1018 	}
1019 	swa->sw_axf = axf;
1020 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1021 		return (EINVAL);
1022 	if (csp->csp_auth_mlen == 0)
1023 		swa->sw_mlen = axf->hashsize;
1024 	else
1025 		swa->sw_mlen = csp->csp_auth_mlen;
1026 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1027 	if (swa->sw_ictx == NULL)
1028 		return (ENOBUFS);
1029 	axf->Init(swa->sw_ictx);
1030 	if (csp->csp_cipher_key != NULL)
1031 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1032 		    csp->csp_cipher_klen);
1033 
1034 	/* Second, setup the cipher side. */
1035 	return (swcr_setup_cipher(ses, csp));
1036 }
1037 
1038 static int
1039 swcr_setup_ccm(struct swcr_session *ses,
1040     const struct crypto_session_params *csp)
1041 {
1042 	struct swcr_auth *swa;
1043 	struct auth_hash *axf;
1044 
1045 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1046 		return (EINVAL);
1047 
1048 	/* First, setup the auth side. */
1049 	swa = &ses->swcr_auth;
1050 	switch (csp->csp_cipher_klen * 8) {
1051 	case 128:
1052 		axf = &auth_hash_ccm_cbc_mac_128;
1053 		break;
1054 	case 192:
1055 		axf = &auth_hash_ccm_cbc_mac_192;
1056 		break;
1057 	case 256:
1058 		axf = &auth_hash_ccm_cbc_mac_256;
1059 		break;
1060 	default:
1061 		return (EINVAL);
1062 	}
1063 	swa->sw_axf = axf;
1064 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1065 		return (EINVAL);
1066 	if (csp->csp_auth_mlen == 0)
1067 		swa->sw_mlen = axf->hashsize;
1068 	else
1069 		swa->sw_mlen = csp->csp_auth_mlen;
1070 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1071 	if (swa->sw_ictx == NULL)
1072 		return (ENOBUFS);
1073 	axf->Init(swa->sw_ictx);
1074 	if (csp->csp_cipher_key != NULL)
1075 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1076 		    csp->csp_cipher_klen);
1077 
1078 	/* Second, setup the cipher side. */
1079 	return (swcr_setup_cipher(ses, csp));
1080 }
1081 
1082 static bool
1083 swcr_auth_supported(const struct crypto_session_params *csp)
1084 {
1085 	struct auth_hash *axf;
1086 
1087 	axf = crypto_auth_hash(csp);
1088 	if (axf == NULL)
1089 		return (false);
1090 	switch (csp->csp_auth_alg) {
1091 	case CRYPTO_SHA1_HMAC:
1092 	case CRYPTO_SHA2_224_HMAC:
1093 	case CRYPTO_SHA2_256_HMAC:
1094 	case CRYPTO_SHA2_384_HMAC:
1095 	case CRYPTO_SHA2_512_HMAC:
1096 	case CRYPTO_NULL_HMAC:
1097 	case CRYPTO_RIPEMD160_HMAC:
1098 		break;
1099 	case CRYPTO_AES_NIST_GMAC:
1100 		switch (csp->csp_auth_klen * 8) {
1101 		case 128:
1102 		case 192:
1103 		case 256:
1104 			break;
1105 		default:
1106 			return (false);
1107 		}
1108 		if (csp->csp_auth_key == NULL)
1109 			return (false);
1110 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1111 			return (false);
1112 		break;
1113 	case CRYPTO_POLY1305:
1114 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1115 			return (false);
1116 		break;
1117 	case CRYPTO_AES_CCM_CBC_MAC:
1118 		switch (csp->csp_auth_klen * 8) {
1119 		case 128:
1120 		case 192:
1121 		case 256:
1122 			break;
1123 		default:
1124 			return (false);
1125 		}
1126 		if (csp->csp_auth_key == NULL)
1127 			return (false);
1128 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1129 			return (false);
1130 		break;
1131 	}
1132 	return (true);
1133 }
1134 
1135 static bool
1136 swcr_cipher_supported(const struct crypto_session_params *csp)
1137 {
1138 	struct enc_xform *txf;
1139 
1140 	txf = crypto_cipher(csp);
1141 	if (txf == NULL)
1142 		return (false);
1143 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1144 	    txf->ivsize != csp->csp_ivlen)
1145 		return (false);
1146 	return (true);
1147 }
1148 
1149 static int
1150 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1151 {
1152 
1153 	if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0)
1154 		return (EINVAL);
1155 	switch (csp->csp_mode) {
1156 	case CSP_MODE_COMPRESS:
1157 		switch (csp->csp_cipher_alg) {
1158 		case CRYPTO_DEFLATE_COMP:
1159 			break;
1160 		default:
1161 			return (EINVAL);
1162 		}
1163 		break;
1164 	case CSP_MODE_CIPHER:
1165 		switch (csp->csp_cipher_alg) {
1166 		case CRYPTO_AES_NIST_GCM_16:
1167 		case CRYPTO_AES_CCM_16:
1168 			return (EINVAL);
1169 		default:
1170 			if (!swcr_cipher_supported(csp))
1171 				return (EINVAL);
1172 			break;
1173 		}
1174 		break;
1175 	case CSP_MODE_DIGEST:
1176 		if (!swcr_auth_supported(csp))
1177 			return (EINVAL);
1178 		break;
1179 	case CSP_MODE_AEAD:
1180 		switch (csp->csp_cipher_alg) {
1181 		case CRYPTO_AES_NIST_GCM_16:
1182 		case CRYPTO_AES_CCM_16:
1183 			break;
1184 		default:
1185 			return (EINVAL);
1186 		}
1187 		break;
1188 	case CSP_MODE_ETA:
1189 		/* AEAD algorithms cannot be used for EtA. */
1190 		switch (csp->csp_cipher_alg) {
1191 		case CRYPTO_AES_NIST_GCM_16:
1192 		case CRYPTO_AES_CCM_16:
1193 			return (EINVAL);
1194 		}
1195 		switch (csp->csp_auth_alg) {
1196 		case CRYPTO_AES_NIST_GMAC:
1197 		case CRYPTO_AES_CCM_CBC_MAC:
1198 			return (EINVAL);
1199 		}
1200 
1201 		if (!swcr_cipher_supported(csp) ||
1202 		    !swcr_auth_supported(csp))
1203 			return (EINVAL);
1204 		break;
1205 	default:
1206 		return (EINVAL);
1207 	}
1208 
1209 	return (CRYPTODEV_PROBE_SOFTWARE);
1210 }
1211 
1212 /*
1213  * Generate a new software session.
1214  */
1215 static int
1216 swcr_newsession(device_t dev, crypto_session_t cses,
1217     const struct crypto_session_params *csp)
1218 {
1219 	struct swcr_session *ses;
1220 	struct swcr_encdec *swe;
1221 	struct swcr_auth *swa;
1222 	struct comp_algo *cxf;
1223 	int error;
1224 
1225 	ses = crypto_get_driver_session(cses);
1226 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1227 
1228 	error = 0;
1229 	swe = &ses->swcr_encdec;
1230 	swa = &ses->swcr_auth;
1231 	switch (csp->csp_mode) {
1232 	case CSP_MODE_COMPRESS:
1233 		switch (csp->csp_cipher_alg) {
1234 		case CRYPTO_DEFLATE_COMP:
1235 			cxf = &comp_algo_deflate;
1236 			break;
1237 #ifdef INVARIANTS
1238 		default:
1239 			panic("bad compression algo");
1240 #endif
1241 		}
1242 		ses->swcr_compdec.sw_cxf = cxf;
1243 		ses->swcr_process = swcr_compdec;
1244 		break;
1245 	case CSP_MODE_CIPHER:
1246 		switch (csp->csp_cipher_alg) {
1247 		case CRYPTO_NULL_CBC:
1248 			ses->swcr_process = swcr_null;
1249 			break;
1250 #ifdef INVARIANTS
1251 		case CRYPTO_AES_NIST_GCM_16:
1252 		case CRYPTO_AES_CCM_16:
1253 			panic("bad cipher algo");
1254 #endif
1255 		default:
1256 			error = swcr_setup_cipher(ses, csp);
1257 			if (error == 0)
1258 				ses->swcr_process = swcr_encdec;
1259 		}
1260 		break;
1261 	case CSP_MODE_DIGEST:
1262 		error = swcr_setup_auth(ses, csp);
1263 		break;
1264 	case CSP_MODE_AEAD:
1265 		switch (csp->csp_cipher_alg) {
1266 		case CRYPTO_AES_NIST_GCM_16:
1267 			error = swcr_setup_gcm(ses, csp);
1268 			if (error == 0)
1269 				ses->swcr_process = swcr_gcm;
1270 			break;
1271 		case CRYPTO_AES_CCM_16:
1272 			error = swcr_setup_ccm(ses, csp);
1273 			if (error == 0)
1274 				ses->swcr_process = swcr_ccm;
1275 			break;
1276 #ifdef INVARIANTS
1277 		default:
1278 			panic("bad aead algo");
1279 #endif
1280 		}
1281 		break;
1282 	case CSP_MODE_ETA:
1283 #ifdef INVARIANTS
1284 		switch (csp->csp_cipher_alg) {
1285 		case CRYPTO_AES_NIST_GCM_16:
1286 		case CRYPTO_AES_CCM_16:
1287 			panic("bad eta cipher algo");
1288 		}
1289 		switch (csp->csp_auth_alg) {
1290 		case CRYPTO_AES_NIST_GMAC:
1291 		case CRYPTO_AES_CCM_CBC_MAC:
1292 			panic("bad eta auth algo");
1293 		}
1294 #endif
1295 
1296 		error = swcr_setup_auth(ses, csp);
1297 		if (error)
1298 			break;
1299 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1300 			/* Effectively degrade to digest mode. */
1301 			ses->swcr_process = swcr_authcompute;
1302 			break;
1303 		}
1304 
1305 		error = swcr_setup_cipher(ses, csp);
1306 		if (error == 0)
1307 			ses->swcr_process = swcr_eta;
1308 		break;
1309 	default:
1310 		error = EINVAL;
1311 	}
1312 
1313 	if (error)
1314 		swcr_freesession(dev, cses);
1315 	return (error);
1316 }
1317 
1318 static void
1319 swcr_freesession(device_t dev, crypto_session_t cses)
1320 {
1321 	struct swcr_session *ses;
1322 	struct swcr_auth *swa;
1323 	struct auth_hash *axf;
1324 
1325 	ses = crypto_get_driver_session(cses);
1326 
1327 	mtx_destroy(&ses->swcr_lock);
1328 
1329 	zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA);
1330 
1331 	axf = ses->swcr_auth.sw_axf;
1332 	if (axf != NULL) {
1333 		swa = &ses->swcr_auth;
1334 		if (swa->sw_ictx != NULL) {
1335 			explicit_bzero(swa->sw_ictx, axf->ctxsize);
1336 			free(swa->sw_ictx, M_CRYPTO_DATA);
1337 		}
1338 		if (swa->sw_octx != NULL) {
1339 			explicit_bzero(swa->sw_octx, axf->ctxsize);
1340 			free(swa->sw_octx, M_CRYPTO_DATA);
1341 		}
1342 	}
1343 }
1344 
1345 /*
1346  * Process a software request.
1347  */
1348 static int
1349 swcr_process(device_t dev, struct cryptop *crp, int hint)
1350 {
1351 	struct swcr_session *ses;
1352 
1353 	ses = crypto_get_driver_session(crp->crp_session);
1354 	mtx_lock(&ses->swcr_lock);
1355 
1356 	crp->crp_etype = ses->swcr_process(ses, crp);
1357 
1358 	mtx_unlock(&ses->swcr_lock);
1359 	crypto_done(crp);
1360 	return (0);
1361 }
1362 
1363 static void
1364 swcr_identify(driver_t *drv, device_t parent)
1365 {
1366 	/* NB: order 10 is so we get attached after h/w devices */
1367 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1368 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1369 		panic("cryptosoft: could not attach");
1370 }
1371 
1372 static int
1373 swcr_probe(device_t dev)
1374 {
1375 	device_set_desc(dev, "software crypto");
1376 	return (BUS_PROBE_NOWILDCARD);
1377 }
1378 
1379 static int
1380 swcr_attach(device_t dev)
1381 {
1382 
1383 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1384 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1385 	if (swcr_id < 0) {
1386 		device_printf(dev, "cannot initialize!");
1387 		return (ENXIO);
1388 	}
1389 
1390 	return (0);
1391 }
1392 
1393 static int
1394 swcr_detach(device_t dev)
1395 {
1396 	crypto_unregister_all(swcr_id);
1397 	return 0;
1398 }
1399 
1400 static device_method_t swcr_methods[] = {
1401 	DEVMETHOD(device_identify,	swcr_identify),
1402 	DEVMETHOD(device_probe,		swcr_probe),
1403 	DEVMETHOD(device_attach,	swcr_attach),
1404 	DEVMETHOD(device_detach,	swcr_detach),
1405 
1406 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1407 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1408 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1409 	DEVMETHOD(cryptodev_process,	swcr_process),
1410 
1411 	{0, 0},
1412 };
1413 
1414 static driver_t swcr_driver = {
1415 	"cryptosoft",
1416 	swcr_methods,
1417 	0,		/* NB: no softc */
1418 };
1419 static devclass_t swcr_devclass;
1420 
1421 /*
1422  * NB: We explicitly reference the crypto module so we
1423  * get the necessary ordering when built as a loadable
1424  * module.  This is required because we bundle the crypto
1425  * module code together with the cryptosoft driver (otherwise
1426  * normal module dependencies would handle things).
1427  */
1428 extern int crypto_modevent(struct module *, int, void *);
1429 /* XXX where to attach */
1430 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1431 MODULE_VERSION(cryptosoft, 1);
1432 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1433