xref: /freebsd/sys/opencrypto/cryptosoft.c (revision c5ea81f7a6786e7398678551e7d26d5b628c4caa)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 };
66 
67 struct swcr_encdec {
68 	void		*sw_kschedule;
69 	struct enc_xform *sw_exf;
70 };
71 
72 struct swcr_compdec {
73 	struct comp_algo *sw_cxf;
74 };
75 
76 struct swcr_session {
77 	struct mtx	swcr_lock;
78 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	const struct crypto_session_params *csp;
106 	struct swcr_encdec *sw;
107 	struct enc_xform *exf;
108 	int i, blks, inlen, ivlen, outlen, resid;
109 	struct crypto_buffer_cursor cc_in, cc_out;
110 	const char *inblk;
111 	char *outblk;
112 	int error;
113 	bool encrypting;
114 
115 	error = 0;
116 
117 	sw = &ses->swcr_encdec;
118 	exf = sw->sw_exf;
119 	ivlen = exf->ivsize;
120 
121 	if (exf->native_blocksize == 0) {
122 		/* Check for non-padded data */
123 		if ((crp->crp_payload_length % exf->blocksize) != 0)
124 			return (EINVAL);
125 
126 		blks = exf->blocksize;
127 	} else
128 		blks = exf->native_blocksize;
129 
130 	if (exf == &enc_xform_aes_icm &&
131 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
132 		return (EINVAL);
133 
134 	crypto_read_iv(crp, iv);
135 
136 	if (crp->crp_cipher_key != NULL) {
137 		csp = crypto_get_params(crp->crp_session);
138 		error = exf->setkey(sw->sw_kschedule,
139 		    crp->crp_cipher_key, csp->csp_cipher_klen);
140 		if (error)
141 			return (error);
142 	}
143 
144 	if (exf->reinit) {
145 		/*
146 		 * xforms that provide a reinit method perform all IV
147 		 * handling themselves.
148 		 */
149 		exf->reinit(sw->sw_kschedule, iv);
150 	}
151 
152 	ivp = iv;
153 
154 	crypto_cursor_init(&cc_in, &crp->crp_buf);
155 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
156 	inlen = crypto_cursor_seglen(&cc_in);
157 	inblk = crypto_cursor_segbase(&cc_in);
158 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
159 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
160 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
161 	} else
162 		cc_out = cc_in;
163 	outlen = crypto_cursor_seglen(&cc_out);
164 	outblk = crypto_cursor_segbase(&cc_out);
165 
166 	resid = crp->crp_payload_length;
167 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
168 
169 	/*
170 	 * Loop through encrypting blocks.  'inlen' is the remaining
171 	 * length of the current segment in the input buffer.
172 	 * 'outlen' is the remaining length of current segment in the
173 	 * output buffer.
174 	 */
175 	while (resid >= blks) {
176 		/*
177 		 * If the current block is not contained within the
178 		 * current input/output segment, use 'blk' as a local
179 		 * buffer.
180 		 */
181 		if (inlen < blks) {
182 			crypto_cursor_copydata(&cc_in, blks, blk);
183 			inblk = blk;
184 		}
185 		if (outlen < blks)
186 			outblk = blk;
187 
188 		/*
189 		 * Ciphers without a 'reinit' hook are assumed to be
190 		 * used in CBC mode where the chaining is done here.
191 		 */
192 		if (exf->reinit != NULL) {
193 			if (encrypting)
194 				exf->encrypt(sw->sw_kschedule, inblk, outblk);
195 			else
196 				exf->decrypt(sw->sw_kschedule, inblk, outblk);
197 		} else if (encrypting) {
198 			/* XOR with previous block */
199 			for (i = 0; i < blks; i++)
200 				outblk[i] = inblk[i] ^ ivp[i];
201 
202 			exf->encrypt(sw->sw_kschedule, outblk, outblk);
203 
204 			/*
205 			 * Keep encrypted block for XOR'ing
206 			 * with next block
207 			 */
208 			memcpy(iv, outblk, blks);
209 			ivp = iv;
210 		} else {	/* decrypt */
211 			/*
212 			 * Keep encrypted block for XOR'ing
213 			 * with next block
214 			 */
215 			nivp = (ivp == iv) ? iv2 : iv;
216 			memcpy(nivp, inblk, blks);
217 
218 			exf->decrypt(sw->sw_kschedule, inblk, outblk);
219 
220 			/* XOR with previous block */
221 			for (i = 0; i < blks; i++)
222 				outblk[i] ^= ivp[i];
223 
224 			ivp = nivp;
225 		}
226 
227 		if (inlen < blks) {
228 			inlen = crypto_cursor_seglen(&cc_in);
229 			inblk = crypto_cursor_segbase(&cc_in);
230 		} else {
231 			crypto_cursor_advance(&cc_in, blks);
232 			inlen -= blks;
233 			inblk += blks;
234 		}
235 
236 		if (outlen < blks) {
237 			crypto_cursor_copyback(&cc_out, blks, blk);
238 			outlen = crypto_cursor_seglen(&cc_out);
239 			outblk = crypto_cursor_segbase(&cc_out);
240 		} else {
241 			crypto_cursor_advance(&cc_out, blks);
242 			outlen -= blks;
243 			outblk += blks;
244 		}
245 
246 		resid -= blks;
247 	}
248 
249 	/* Handle trailing partial block for stream ciphers. */
250 	if (resid > 0) {
251 		KASSERT(exf->native_blocksize != 0,
252 		    ("%s: partial block of %d bytes for cipher %s",
253 		    __func__, i, exf->name));
254 		KASSERT(exf->reinit != NULL,
255 		    ("%s: partial block cipher %s without reinit hook",
256 		    __func__, exf->name));
257 		KASSERT(resid < blks, ("%s: partial block too big", __func__));
258 
259 		inlen = crypto_cursor_seglen(&cc_in);
260 		outlen = crypto_cursor_seglen(&cc_out);
261 		if (inlen < resid) {
262 			crypto_cursor_copydata(&cc_in, resid, blk);
263 			inblk = blk;
264 		} else
265 			inblk = crypto_cursor_segbase(&cc_in);
266 		if (outlen < resid)
267 			outblk = blk;
268 		else
269 			outblk = crypto_cursor_segbase(&cc_out);
270 		if (encrypting)
271 			exf->encrypt_last(sw->sw_kschedule, inblk, outblk,
272 			    resid);
273 		else
274 			exf->decrypt_last(sw->sw_kschedule, inblk, outblk,
275 			    resid);
276 		if (outlen < resid)
277 			crypto_cursor_copyback(&cc_out, resid, blk);
278 	}
279 
280 	return (0);
281 }
282 
283 static void
284 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
285     const uint8_t *key, int klen)
286 {
287 
288 	switch (axf->type) {
289 	case CRYPTO_SHA1_HMAC:
290 	case CRYPTO_SHA2_224_HMAC:
291 	case CRYPTO_SHA2_256_HMAC:
292 	case CRYPTO_SHA2_384_HMAC:
293 	case CRYPTO_SHA2_512_HMAC:
294 	case CRYPTO_NULL_HMAC:
295 	case CRYPTO_RIPEMD160_HMAC:
296 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
297 		hmac_init_opad(axf, key, klen, sw->sw_octx);
298 		break;
299 	case CRYPTO_POLY1305:
300 	case CRYPTO_BLAKE2B:
301 	case CRYPTO_BLAKE2S:
302 		axf->Setkey(sw->sw_ictx, key, klen);
303 		axf->Init(sw->sw_ictx);
304 		break;
305 	default:
306 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
307 	}
308 }
309 
310 /*
311  * Compute or verify hash.
312  */
313 static int
314 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
315 {
316 	u_char aalg[HASH_MAX_LEN];
317 	u_char uaalg[HASH_MAX_LEN];
318 	const struct crypto_session_params *csp;
319 	struct swcr_auth *sw;
320 	struct auth_hash *axf;
321 	union authctx ctx;
322 	int err;
323 
324 	sw = &ses->swcr_auth;
325 
326 	axf = sw->sw_axf;
327 
328 	if (crp->crp_auth_key != NULL) {
329 		csp = crypto_get_params(crp->crp_session);
330 		swcr_authprepare(axf, sw, crp->crp_auth_key,
331 		    csp->csp_auth_klen);
332 	}
333 
334 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
335 
336 	err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
337 	    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
338 	if (err)
339 		return err;
340 
341 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
342 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
343 		err = crypto_apply_buf(&crp->crp_obuf,
344 		    crp->crp_payload_output_start, crp->crp_payload_length,
345 		    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
346 	else
347 		err = crypto_apply(crp, crp->crp_payload_start,
348 		    crp->crp_payload_length,
349 		    (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
350 	if (err)
351 		return err;
352 
353 	switch (axf->type) {
354 	case CRYPTO_SHA1:
355 	case CRYPTO_SHA2_224:
356 	case CRYPTO_SHA2_256:
357 	case CRYPTO_SHA2_384:
358 	case CRYPTO_SHA2_512:
359 		axf->Final(aalg, &ctx);
360 		break;
361 
362 	case CRYPTO_SHA1_HMAC:
363 	case CRYPTO_SHA2_224_HMAC:
364 	case CRYPTO_SHA2_256_HMAC:
365 	case CRYPTO_SHA2_384_HMAC:
366 	case CRYPTO_SHA2_512_HMAC:
367 	case CRYPTO_RIPEMD160_HMAC:
368 		if (sw->sw_octx == NULL)
369 			return EINVAL;
370 
371 		axf->Final(aalg, &ctx);
372 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
373 		axf->Update(&ctx, aalg, axf->hashsize);
374 		axf->Final(aalg, &ctx);
375 		break;
376 
377 	case CRYPTO_BLAKE2B:
378 	case CRYPTO_BLAKE2S:
379 	case CRYPTO_NULL_HMAC:
380 	case CRYPTO_POLY1305:
381 		axf->Final(aalg, &ctx);
382 		break;
383 	}
384 
385 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
386 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
387 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
388 			return (EBADMSG);
389 	} else {
390 		/* Inject the authentication data */
391 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
392 	}
393 	return (0);
394 }
395 
396 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
397 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
398 
399 static int
400 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
401 {
402 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
403 	u_char *blk = (u_char *)blkbuf;
404 	u_char aalg[AALG_MAX_RESULT_LEN];
405 	u_char uaalg[AALG_MAX_RESULT_LEN];
406 	u_char iv[EALG_MAX_BLOCK_LEN];
407 	struct crypto_buffer_cursor cc;
408 	union authctx ctx;
409 	struct swcr_auth *swa;
410 	struct auth_hash *axf;
411 	uint32_t *blkp;
412 	int blksz, ivlen, len, resid;
413 
414 	swa = &ses->swcr_auth;
415 	axf = swa->sw_axf;
416 
417 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
418 	blksz = axf->blocksize;
419 
420 	/* Initialize the IV */
421 	ivlen = AES_GCM_IV_LEN;
422 	crypto_read_iv(crp, iv);
423 
424 	axf->Reinit(&ctx, iv, ivlen);
425 	crypto_cursor_init(&cc, &crp->crp_buf);
426 	crypto_cursor_advance(&cc, crp->crp_payload_start);
427 	for (resid = crp->crp_payload_length; resid > 0; resid -= len) {
428 		len = MIN(resid, blksz);
429 		crypto_cursor_copydata(&cc, len, blk);
430 		bzero(blk + len, blksz - len);
431 		axf->Update(&ctx, blk, blksz);
432 	}
433 
434 	/* length block */
435 	bzero(blk, blksz);
436 	blkp = (uint32_t *)blk + 1;
437 	*blkp = htobe32(crp->crp_payload_length * 8);
438 	axf->Update(&ctx, blk, blksz);
439 
440 	/* Finalize MAC */
441 	axf->Final(aalg, &ctx);
442 
443 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
444 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
445 		    uaalg);
446 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
447 			return (EBADMSG);
448 	} else {
449 		/* Inject the authentication data */
450 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
451 	}
452 	return (0);
453 }
454 
455 static int
456 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
457 {
458 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
459 	u_char *blk = (u_char *)blkbuf;
460 	u_char aalg[AALG_MAX_RESULT_LEN];
461 	u_char uaalg[AALG_MAX_RESULT_LEN];
462 	u_char iv[EALG_MAX_BLOCK_LEN];
463 	struct crypto_buffer_cursor cc_in, cc_out;
464 	union authctx ctx;
465 	struct swcr_auth *swa;
466 	struct swcr_encdec *swe;
467 	struct auth_hash *axf;
468 	struct enc_xform *exf;
469 	uint32_t *blkp;
470 	int blksz, ivlen, len, r, resid;
471 
472 	swa = &ses->swcr_auth;
473 	axf = swa->sw_axf;
474 
475 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
476 	blksz = axf->blocksize;
477 
478 	swe = &ses->swcr_encdec;
479 	exf = swe->sw_exf;
480 	KASSERT(axf->blocksize == exf->native_blocksize,
481 	    ("%s: blocksize mismatch", __func__));
482 
483 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
484 		return (EINVAL);
485 
486 	/* Initialize the IV */
487 	ivlen = AES_GCM_IV_LEN;
488 	bcopy(crp->crp_iv, iv, ivlen);
489 
490 	/* Supply MAC with IV */
491 	axf->Reinit(&ctx, iv, ivlen);
492 
493 	/* Supply MAC with AAD */
494 	crypto_cursor_init(&cc_in, &crp->crp_buf);
495 	crypto_cursor_advance(&cc_in, crp->crp_aad_start);
496 	for (resid = crp->crp_aad_length; resid > 0; resid -= len) {
497 		len = MIN(resid, blksz);
498 		crypto_cursor_copydata(&cc_in, len, blk);
499 		bzero(blk + len, blksz - len);
500 		axf->Update(&ctx, blk, blksz);
501 	}
502 
503 	exf->reinit(swe->sw_kschedule, iv);
504 
505 	/* Do encryption with MAC */
506 	crypto_cursor_init(&cc_in, &crp->crp_buf);
507 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
508 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
509 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
510 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
511 	} else
512 		cc_out = cc_in;
513 	for (resid = crp->crp_payload_length; resid > 0; resid -= len) {
514 		len = MIN(resid, blksz);
515 		if (len < blksz)
516 			bzero(blk, blksz);
517 		crypto_cursor_copydata(&cc_in, len, blk);
518 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
519 			exf->encrypt(swe->sw_kschedule, blk, blk);
520 			axf->Update(&ctx, blk, len);
521 			crypto_cursor_copyback(&cc_out, len, blk);
522 		} else {
523 			axf->Update(&ctx, blk, len);
524 		}
525 	}
526 
527 	/* length block */
528 	bzero(blk, blksz);
529 	blkp = (uint32_t *)blk + 1;
530 	*blkp = htobe32(crp->crp_aad_length * 8);
531 	blkp = (uint32_t *)blk + 3;
532 	*blkp = htobe32(crp->crp_payload_length * 8);
533 	axf->Update(&ctx, blk, blksz);
534 
535 	/* Finalize MAC */
536 	axf->Final(aalg, &ctx);
537 
538 	/* Validate tag */
539 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
540 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
541 		    uaalg);
542 
543 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
544 		if (r != 0)
545 			return (EBADMSG);
546 
547 		/* tag matches, decrypt data */
548 		crypto_cursor_init(&cc_in, &crp->crp_buf);
549 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
550 		for (resid = crp->crp_payload_length; resid > 0;
551 		     resid -= len) {
552 			len = MIN(resid, blksz);
553 			if (len < blksz)
554 				bzero(blk, blksz);
555 			crypto_cursor_copydata(&cc_in, len, blk);
556 			exf->decrypt(swe->sw_kschedule, blk, blk);
557 			crypto_cursor_copyback(&cc_out, len, blk);
558 		}
559 	} else {
560 		/* Inject the authentication data */
561 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
562 		    aalg);
563 	}
564 
565 	return (0);
566 }
567 
568 static int
569 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
570 {
571 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
572 	u_char *blk = (u_char *)blkbuf;
573 	u_char aalg[AALG_MAX_RESULT_LEN];
574 	u_char uaalg[AALG_MAX_RESULT_LEN];
575 	u_char iv[EALG_MAX_BLOCK_LEN];
576 	struct crypto_buffer_cursor cc;
577 	union authctx ctx;
578 	struct swcr_auth *swa;
579 	struct auth_hash *axf;
580 	int blksz, ivlen, len, resid;
581 
582 	swa = &ses->swcr_auth;
583 	axf = swa->sw_axf;
584 
585 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
586 	blksz = axf->blocksize;
587 
588 	/* Initialize the IV */
589 	ivlen = AES_CCM_IV_LEN;
590 	crypto_read_iv(crp, iv);
591 
592 	/*
593 	 * AES CCM-CBC-MAC needs to know the length of both the auth
594 	 * data and payload data before doing the auth computation.
595 	 */
596 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
597 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
598 
599 	axf->Reinit(&ctx, iv, ivlen);
600 	crypto_cursor_init(&cc, &crp->crp_buf);
601 	crypto_cursor_advance(&cc, crp->crp_aad_start);
602 	for (resid = crp->crp_payload_length; resid > 0; resid -= len) {
603 		len = MIN(resid, blksz);
604 		crypto_cursor_copydata(&cc, len, blk);
605 		bzero(blk + len, blksz - len);
606 		axf->Update(&ctx, blk, blksz);
607 	}
608 
609 	/* Finalize MAC */
610 	axf->Final(aalg, &ctx);
611 
612 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
613 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
614 		    uaalg);
615 		if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
616 			return (EBADMSG);
617 	} else {
618 		/* Inject the authentication data */
619 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
620 	}
621 	return (0);
622 }
623 
624 static int
625 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
626 {
627 	uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
628 	u_char *blk = (u_char *)blkbuf;
629 	u_char aalg[AALG_MAX_RESULT_LEN];
630 	u_char uaalg[AALG_MAX_RESULT_LEN];
631 	u_char iv[EALG_MAX_BLOCK_LEN];
632 	struct crypto_buffer_cursor cc_in, cc_out;
633 	union authctx ctx;
634 	struct swcr_auth *swa;
635 	struct swcr_encdec *swe;
636 	struct auth_hash *axf;
637 	struct enc_xform *exf;
638 	int blksz, ivlen, len, r, resid;
639 
640 	swa = &ses->swcr_auth;
641 	axf = swa->sw_axf;
642 
643 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
644 	blksz = axf->blocksize;
645 
646 	swe = &ses->swcr_encdec;
647 	exf = swe->sw_exf;
648 	KASSERT(axf->blocksize == exf->native_blocksize,
649 	    ("%s: blocksize mismatch", __func__));
650 
651 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
652 		return (EINVAL);
653 
654 	/* Initialize the IV */
655 	ivlen = AES_CCM_IV_LEN;
656 	bcopy(crp->crp_iv, iv, ivlen);
657 
658 	/*
659 	 * AES CCM-CBC-MAC needs to know the length of both the auth
660 	 * data and payload data before doing the auth computation.
661 	 */
662 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
663 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
664 
665 	/* Supply MAC with IV */
666 	axf->Reinit(&ctx, iv, ivlen);
667 
668 	/* Supply MAC with AAD */
669 	crypto_cursor_init(&cc_in, &crp->crp_buf);
670 	crypto_cursor_advance(&cc_in, crp->crp_aad_start);
671 	for (resid = crp->crp_aad_length; resid > 0; resid -= len) {
672 		len = MIN(resid, blksz);
673 		crypto_cursor_copydata(&cc_in, len, blk);
674 		bzero(blk + len, blksz - len);
675 		axf->Update(&ctx, blk, blksz);
676 	}
677 
678 	exf->reinit(swe->sw_kschedule, iv);
679 
680 	/* Do encryption/decryption with MAC */
681 	crypto_cursor_init(&cc_in, &crp->crp_buf);
682 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
683 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
684 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
685 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
686 	} else
687 		cc_out = cc_in;
688 	for (resid = crp->crp_payload_length; resid > 0; resid -= len) {
689 		len = MIN(resid, blksz);
690 		if (len < blksz)
691 			bzero(blk, blksz);
692 		crypto_cursor_copydata(&cc_in, len, blk);
693 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
694 			axf->Update(&ctx, blk, len);
695 			exf->encrypt(swe->sw_kschedule, blk, blk);
696 			crypto_cursor_copyback(&cc_out, len, blk);
697 		} else {
698 			/*
699 			 * One of the problems with CCM+CBC is that
700 			 * the authentication is done on the
701 			 * unecncrypted data.  As a result, we have to
702 			 * decrypt the data twice: once to generate
703 			 * the tag and a second time after the tag is
704 			 * verified.
705 			 */
706 			exf->decrypt(swe->sw_kschedule, blk, blk);
707 			axf->Update(&ctx, blk, len);
708 		}
709 	}
710 
711 	/* Finalize MAC */
712 	axf->Final(aalg, &ctx);
713 
714 	/* Validate tag */
715 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
716 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
717 		    uaalg);
718 
719 		r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
720 		if (r != 0)
721 			return (EBADMSG);
722 
723 		/* tag matches, decrypt data */
724 		exf->reinit(swe->sw_kschedule, iv);
725 		crypto_cursor_init(&cc_in, &crp->crp_buf);
726 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
727 		for (resid = crp->crp_payload_length; resid > 0;
728 		     resid -= len) {
729 			len = MIN(resid, blksz);
730 			if (len < blksz)
731 				bzero(blk, blksz);
732 			crypto_cursor_copydata(&cc_in, len, blk);
733 			exf->decrypt(swe->sw_kschedule, blk, blk);
734 			crypto_cursor_copyback(&cc_out, len, blk);
735 		}
736 	} else {
737 		/* Inject the authentication data */
738 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
739 		    aalg);
740 	}
741 
742 	return (0);
743 }
744 
745 /*
746  * Apply a cipher and a digest to perform EtA.
747  */
748 static int
749 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
750 {
751 	int error;
752 
753 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
754 		error = swcr_encdec(ses, crp);
755 		if (error == 0)
756 			error = swcr_authcompute(ses, crp);
757 	} else {
758 		error = swcr_authcompute(ses, crp);
759 		if (error == 0)
760 			error = swcr_encdec(ses, crp);
761 	}
762 	return (error);
763 }
764 
765 /*
766  * Apply a compression/decompression algorithm
767  */
768 static int
769 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
770 {
771 	u_int8_t *data, *out;
772 	struct comp_algo *cxf;
773 	int adj;
774 	u_int32_t result;
775 
776 	cxf = ses->swcr_compdec.sw_cxf;
777 
778 	/* We must handle the whole buffer of data in one time
779 	 * then if there is not all the data in the mbuf, we must
780 	 * copy in a buffer.
781 	 */
782 
783 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
784 	if (data == NULL)
785 		return (EINVAL);
786 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
787 	    data);
788 
789 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
790 		result = cxf->compress(data, crp->crp_payload_length, &out);
791 	else
792 		result = cxf->decompress(data, crp->crp_payload_length, &out);
793 
794 	free(data, M_CRYPTO_DATA);
795 	if (result == 0)
796 		return (EINVAL);
797 	crp->crp_olen = result;
798 
799 	/* Check the compressed size when doing compression */
800 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
801 		if (result >= crp->crp_payload_length) {
802 			/* Compression was useless, we lost time */
803 			free(out, M_CRYPTO_DATA);
804 			return (0);
805 		}
806 	}
807 
808 	/* Copy back the (de)compressed data. m_copyback is
809 	 * extending the mbuf as necessary.
810 	 */
811 	crypto_copyback(crp, crp->crp_payload_start, result, out);
812 	if (result < crp->crp_payload_length) {
813 		switch (crp->crp_buf.cb_type) {
814 		case CRYPTO_BUF_MBUF:
815 			adj = result - crp->crp_payload_length;
816 			m_adj(crp->crp_buf.cb_mbuf, adj);
817 			break;
818 		case CRYPTO_BUF_UIO: {
819 			struct uio *uio = crp->crp_buf.cb_uio;
820 			int ind;
821 
822 			adj = crp->crp_payload_length - result;
823 			ind = uio->uio_iovcnt - 1;
824 
825 			while (adj > 0 && ind >= 0) {
826 				if (adj < uio->uio_iov[ind].iov_len) {
827 					uio->uio_iov[ind].iov_len -= adj;
828 					break;
829 				}
830 
831 				adj -= uio->uio_iov[ind].iov_len;
832 				uio->uio_iov[ind].iov_len = 0;
833 				ind--;
834 				uio->uio_iovcnt--;
835 			}
836 			}
837 			break;
838 		default:
839 			break;
840 		}
841 	}
842 	free(out, M_CRYPTO_DATA);
843 	return 0;
844 }
845 
846 static int
847 swcr_setup_cipher(struct swcr_session *ses,
848     const struct crypto_session_params *csp)
849 {
850 	struct swcr_encdec *swe;
851 	struct enc_xform *txf;
852 	int error;
853 
854 	swe = &ses->swcr_encdec;
855 	txf = crypto_cipher(csp);
856 	MPASS(txf->ivsize == csp->csp_ivlen);
857 	if (txf->ctxsize != 0) {
858 		swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA,
859 		    M_NOWAIT);
860 		if (swe->sw_kschedule == NULL)
861 			return (ENOMEM);
862 	}
863 	if (csp->csp_cipher_key != NULL) {
864 		error = txf->setkey(swe->sw_kschedule,
865 		    csp->csp_cipher_key, csp->csp_cipher_klen);
866 		if (error)
867 			return (error);
868 	}
869 	swe->sw_exf = txf;
870 	return (0);
871 }
872 
873 static int
874 swcr_setup_auth(struct swcr_session *ses,
875     const struct crypto_session_params *csp)
876 {
877 	struct swcr_auth *swa;
878 	struct auth_hash *axf;
879 
880 	swa = &ses->swcr_auth;
881 
882 	axf = crypto_auth_hash(csp);
883 	swa->sw_axf = axf;
884 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
885 		return (EINVAL);
886 	if (csp->csp_auth_mlen == 0)
887 		swa->sw_mlen = axf->hashsize;
888 	else
889 		swa->sw_mlen = csp->csp_auth_mlen;
890 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
891 	if (swa->sw_ictx == NULL)
892 		return (ENOBUFS);
893 
894 	switch (csp->csp_auth_alg) {
895 	case CRYPTO_SHA1_HMAC:
896 	case CRYPTO_SHA2_224_HMAC:
897 	case CRYPTO_SHA2_256_HMAC:
898 	case CRYPTO_SHA2_384_HMAC:
899 	case CRYPTO_SHA2_512_HMAC:
900 	case CRYPTO_NULL_HMAC:
901 	case CRYPTO_RIPEMD160_HMAC:
902 		swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
903 		    M_NOWAIT);
904 		if (swa->sw_octx == NULL)
905 			return (ENOBUFS);
906 
907 		if (csp->csp_auth_key != NULL) {
908 			swcr_authprepare(axf, swa, csp->csp_auth_key,
909 			    csp->csp_auth_klen);
910 		}
911 
912 		if (csp->csp_mode == CSP_MODE_DIGEST)
913 			ses->swcr_process = swcr_authcompute;
914 		break;
915 	case CRYPTO_SHA1:
916 	case CRYPTO_SHA2_224:
917 	case CRYPTO_SHA2_256:
918 	case CRYPTO_SHA2_384:
919 	case CRYPTO_SHA2_512:
920 		axf->Init(swa->sw_ictx);
921 		if (csp->csp_mode == CSP_MODE_DIGEST)
922 			ses->swcr_process = swcr_authcompute;
923 		break;
924 	case CRYPTO_AES_NIST_GMAC:
925 		axf->Init(swa->sw_ictx);
926 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
927 		    csp->csp_auth_klen);
928 		if (csp->csp_mode == CSP_MODE_DIGEST)
929 			ses->swcr_process = swcr_gmac;
930 		break;
931 	case CRYPTO_POLY1305:
932 	case CRYPTO_BLAKE2B:
933 	case CRYPTO_BLAKE2S:
934 		/*
935 		 * Blake2b and Blake2s support an optional key but do
936 		 * not require one.
937 		 */
938 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
939 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
940 			    csp->csp_auth_klen);
941 		axf->Init(swa->sw_ictx);
942 		if (csp->csp_mode == CSP_MODE_DIGEST)
943 			ses->swcr_process = swcr_authcompute;
944 		break;
945 	case CRYPTO_AES_CCM_CBC_MAC:
946 		axf->Init(swa->sw_ictx);
947 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
948 		    csp->csp_auth_klen);
949 		if (csp->csp_mode == CSP_MODE_DIGEST)
950 			ses->swcr_process = swcr_ccm_cbc_mac;
951 		break;
952 	}
953 
954 	return (0);
955 }
956 
957 static int
958 swcr_setup_gcm(struct swcr_session *ses,
959     const struct crypto_session_params *csp)
960 {
961 	struct swcr_auth *swa;
962 	struct auth_hash *axf;
963 
964 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
965 		return (EINVAL);
966 
967 	/* First, setup the auth side. */
968 	swa = &ses->swcr_auth;
969 	switch (csp->csp_cipher_klen * 8) {
970 	case 128:
971 		axf = &auth_hash_nist_gmac_aes_128;
972 		break;
973 	case 192:
974 		axf = &auth_hash_nist_gmac_aes_192;
975 		break;
976 	case 256:
977 		axf = &auth_hash_nist_gmac_aes_256;
978 		break;
979 	default:
980 		return (EINVAL);
981 	}
982 	swa->sw_axf = axf;
983 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
984 		return (EINVAL);
985 	if (csp->csp_auth_mlen == 0)
986 		swa->sw_mlen = axf->hashsize;
987 	else
988 		swa->sw_mlen = csp->csp_auth_mlen;
989 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
990 	if (swa->sw_ictx == NULL)
991 		return (ENOBUFS);
992 	axf->Init(swa->sw_ictx);
993 	if (csp->csp_cipher_key != NULL)
994 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
995 		    csp->csp_cipher_klen);
996 
997 	/* Second, setup the cipher side. */
998 	return (swcr_setup_cipher(ses, csp));
999 }
1000 
1001 static int
1002 swcr_setup_ccm(struct swcr_session *ses,
1003     const struct crypto_session_params *csp)
1004 {
1005 	struct swcr_auth *swa;
1006 	struct auth_hash *axf;
1007 
1008 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1009 		return (EINVAL);
1010 
1011 	/* First, setup the auth side. */
1012 	swa = &ses->swcr_auth;
1013 	switch (csp->csp_cipher_klen * 8) {
1014 	case 128:
1015 		axf = &auth_hash_ccm_cbc_mac_128;
1016 		break;
1017 	case 192:
1018 		axf = &auth_hash_ccm_cbc_mac_192;
1019 		break;
1020 	case 256:
1021 		axf = &auth_hash_ccm_cbc_mac_256;
1022 		break;
1023 	default:
1024 		return (EINVAL);
1025 	}
1026 	swa->sw_axf = axf;
1027 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1028 		return (EINVAL);
1029 	if (csp->csp_auth_mlen == 0)
1030 		swa->sw_mlen = axf->hashsize;
1031 	else
1032 		swa->sw_mlen = csp->csp_auth_mlen;
1033 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1034 	if (swa->sw_ictx == NULL)
1035 		return (ENOBUFS);
1036 	axf->Init(swa->sw_ictx);
1037 	if (csp->csp_cipher_key != NULL)
1038 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1039 		    csp->csp_cipher_klen);
1040 
1041 	/* Second, setup the cipher side. */
1042 	return (swcr_setup_cipher(ses, csp));
1043 }
1044 
1045 static bool
1046 swcr_auth_supported(const struct crypto_session_params *csp)
1047 {
1048 	struct auth_hash *axf;
1049 
1050 	axf = crypto_auth_hash(csp);
1051 	if (axf == NULL)
1052 		return (false);
1053 	switch (csp->csp_auth_alg) {
1054 	case CRYPTO_SHA1_HMAC:
1055 	case CRYPTO_SHA2_224_HMAC:
1056 	case CRYPTO_SHA2_256_HMAC:
1057 	case CRYPTO_SHA2_384_HMAC:
1058 	case CRYPTO_SHA2_512_HMAC:
1059 	case CRYPTO_NULL_HMAC:
1060 	case CRYPTO_RIPEMD160_HMAC:
1061 		break;
1062 	case CRYPTO_AES_NIST_GMAC:
1063 		switch (csp->csp_auth_klen * 8) {
1064 		case 128:
1065 		case 192:
1066 		case 256:
1067 			break;
1068 		default:
1069 			return (false);
1070 		}
1071 		if (csp->csp_auth_key == NULL)
1072 			return (false);
1073 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1074 			return (false);
1075 		break;
1076 	case CRYPTO_POLY1305:
1077 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1078 			return (false);
1079 		break;
1080 	case CRYPTO_AES_CCM_CBC_MAC:
1081 		switch (csp->csp_auth_klen * 8) {
1082 		case 128:
1083 		case 192:
1084 		case 256:
1085 			break;
1086 		default:
1087 			return (false);
1088 		}
1089 		if (csp->csp_auth_key == NULL)
1090 			return (false);
1091 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1092 			return (false);
1093 		break;
1094 	}
1095 	return (true);
1096 }
1097 
1098 static bool
1099 swcr_cipher_supported(const struct crypto_session_params *csp)
1100 {
1101 	struct enc_xform *txf;
1102 
1103 	txf = crypto_cipher(csp);
1104 	if (txf == NULL)
1105 		return (false);
1106 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1107 	    txf->ivsize != csp->csp_ivlen)
1108 		return (false);
1109 	return (true);
1110 }
1111 
1112 static int
1113 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1114 {
1115 
1116 	if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0)
1117 		return (EINVAL);
1118 	switch (csp->csp_mode) {
1119 	case CSP_MODE_COMPRESS:
1120 		switch (csp->csp_cipher_alg) {
1121 		case CRYPTO_DEFLATE_COMP:
1122 			break;
1123 		default:
1124 			return (EINVAL);
1125 		}
1126 		break;
1127 	case CSP_MODE_CIPHER:
1128 		switch (csp->csp_cipher_alg) {
1129 		case CRYPTO_AES_NIST_GCM_16:
1130 		case CRYPTO_AES_CCM_16:
1131 			return (EINVAL);
1132 		default:
1133 			if (!swcr_cipher_supported(csp))
1134 				return (EINVAL);
1135 			break;
1136 		}
1137 		break;
1138 	case CSP_MODE_DIGEST:
1139 		if (!swcr_auth_supported(csp))
1140 			return (EINVAL);
1141 		break;
1142 	case CSP_MODE_AEAD:
1143 		switch (csp->csp_cipher_alg) {
1144 		case CRYPTO_AES_NIST_GCM_16:
1145 		case CRYPTO_AES_CCM_16:
1146 			break;
1147 		default:
1148 			return (EINVAL);
1149 		}
1150 		break;
1151 	case CSP_MODE_ETA:
1152 		/* AEAD algorithms cannot be used for EtA. */
1153 		switch (csp->csp_cipher_alg) {
1154 		case CRYPTO_AES_NIST_GCM_16:
1155 		case CRYPTO_AES_CCM_16:
1156 			return (EINVAL);
1157 		}
1158 		switch (csp->csp_auth_alg) {
1159 		case CRYPTO_AES_NIST_GMAC:
1160 		case CRYPTO_AES_CCM_CBC_MAC:
1161 			return (EINVAL);
1162 		}
1163 
1164 		if (!swcr_cipher_supported(csp) ||
1165 		    !swcr_auth_supported(csp))
1166 			return (EINVAL);
1167 		break;
1168 	default:
1169 		return (EINVAL);
1170 	}
1171 
1172 	return (CRYPTODEV_PROBE_SOFTWARE);
1173 }
1174 
1175 /*
1176  * Generate a new software session.
1177  */
1178 static int
1179 swcr_newsession(device_t dev, crypto_session_t cses,
1180     const struct crypto_session_params *csp)
1181 {
1182 	struct swcr_session *ses;
1183 	struct swcr_encdec *swe;
1184 	struct swcr_auth *swa;
1185 	struct comp_algo *cxf;
1186 	int error;
1187 
1188 	ses = crypto_get_driver_session(cses);
1189 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1190 
1191 	error = 0;
1192 	swe = &ses->swcr_encdec;
1193 	swa = &ses->swcr_auth;
1194 	switch (csp->csp_mode) {
1195 	case CSP_MODE_COMPRESS:
1196 		switch (csp->csp_cipher_alg) {
1197 		case CRYPTO_DEFLATE_COMP:
1198 			cxf = &comp_algo_deflate;
1199 			break;
1200 #ifdef INVARIANTS
1201 		default:
1202 			panic("bad compression algo");
1203 #endif
1204 		}
1205 		ses->swcr_compdec.sw_cxf = cxf;
1206 		ses->swcr_process = swcr_compdec;
1207 		break;
1208 	case CSP_MODE_CIPHER:
1209 		switch (csp->csp_cipher_alg) {
1210 		case CRYPTO_NULL_CBC:
1211 			ses->swcr_process = swcr_null;
1212 			break;
1213 #ifdef INVARIANTS
1214 		case CRYPTO_AES_NIST_GCM_16:
1215 		case CRYPTO_AES_CCM_16:
1216 			panic("bad cipher algo");
1217 #endif
1218 		default:
1219 			error = swcr_setup_cipher(ses, csp);
1220 			if (error == 0)
1221 				ses->swcr_process = swcr_encdec;
1222 		}
1223 		break;
1224 	case CSP_MODE_DIGEST:
1225 		error = swcr_setup_auth(ses, csp);
1226 		break;
1227 	case CSP_MODE_AEAD:
1228 		switch (csp->csp_cipher_alg) {
1229 		case CRYPTO_AES_NIST_GCM_16:
1230 			error = swcr_setup_gcm(ses, csp);
1231 			if (error == 0)
1232 				ses->swcr_process = swcr_gcm;
1233 			break;
1234 		case CRYPTO_AES_CCM_16:
1235 			error = swcr_setup_ccm(ses, csp);
1236 			if (error == 0)
1237 				ses->swcr_process = swcr_ccm;
1238 			break;
1239 #ifdef INVARIANTS
1240 		default:
1241 			panic("bad aead algo");
1242 #endif
1243 		}
1244 		break;
1245 	case CSP_MODE_ETA:
1246 #ifdef INVARIANTS
1247 		switch (csp->csp_cipher_alg) {
1248 		case CRYPTO_AES_NIST_GCM_16:
1249 		case CRYPTO_AES_CCM_16:
1250 			panic("bad eta cipher algo");
1251 		}
1252 		switch (csp->csp_auth_alg) {
1253 		case CRYPTO_AES_NIST_GMAC:
1254 		case CRYPTO_AES_CCM_CBC_MAC:
1255 			panic("bad eta auth algo");
1256 		}
1257 #endif
1258 
1259 		error = swcr_setup_auth(ses, csp);
1260 		if (error)
1261 			break;
1262 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1263 			/* Effectively degrade to digest mode. */
1264 			ses->swcr_process = swcr_authcompute;
1265 			break;
1266 		}
1267 
1268 		error = swcr_setup_cipher(ses, csp);
1269 		if (error == 0)
1270 			ses->swcr_process = swcr_eta;
1271 		break;
1272 	default:
1273 		error = EINVAL;
1274 	}
1275 
1276 	if (error)
1277 		swcr_freesession(dev, cses);
1278 	return (error);
1279 }
1280 
1281 static void
1282 swcr_freesession(device_t dev, crypto_session_t cses)
1283 {
1284 	struct swcr_session *ses;
1285 	struct swcr_auth *swa;
1286 	struct auth_hash *axf;
1287 
1288 	ses = crypto_get_driver_session(cses);
1289 
1290 	mtx_destroy(&ses->swcr_lock);
1291 
1292 	zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA);
1293 
1294 	axf = ses->swcr_auth.sw_axf;
1295 	if (axf != NULL) {
1296 		swa = &ses->swcr_auth;
1297 		if (swa->sw_ictx != NULL) {
1298 			explicit_bzero(swa->sw_ictx, axf->ctxsize);
1299 			free(swa->sw_ictx, M_CRYPTO_DATA);
1300 		}
1301 		if (swa->sw_octx != NULL) {
1302 			explicit_bzero(swa->sw_octx, axf->ctxsize);
1303 			free(swa->sw_octx, M_CRYPTO_DATA);
1304 		}
1305 	}
1306 }
1307 
1308 /*
1309  * Process a software request.
1310  */
1311 static int
1312 swcr_process(device_t dev, struct cryptop *crp, int hint)
1313 {
1314 	struct swcr_session *ses;
1315 
1316 	ses = crypto_get_driver_session(crp->crp_session);
1317 	mtx_lock(&ses->swcr_lock);
1318 
1319 	crp->crp_etype = ses->swcr_process(ses, crp);
1320 
1321 	mtx_unlock(&ses->swcr_lock);
1322 	crypto_done(crp);
1323 	return (0);
1324 }
1325 
1326 static void
1327 swcr_identify(driver_t *drv, device_t parent)
1328 {
1329 	/* NB: order 10 is so we get attached after h/w devices */
1330 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1331 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1332 		panic("cryptosoft: could not attach");
1333 }
1334 
1335 static int
1336 swcr_probe(device_t dev)
1337 {
1338 	device_set_desc(dev, "software crypto");
1339 	return (BUS_PROBE_NOWILDCARD);
1340 }
1341 
1342 static int
1343 swcr_attach(device_t dev)
1344 {
1345 
1346 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1347 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1348 	if (swcr_id < 0) {
1349 		device_printf(dev, "cannot initialize!");
1350 		return (ENXIO);
1351 	}
1352 
1353 	return (0);
1354 }
1355 
1356 static int
1357 swcr_detach(device_t dev)
1358 {
1359 	crypto_unregister_all(swcr_id);
1360 	return 0;
1361 }
1362 
1363 static device_method_t swcr_methods[] = {
1364 	DEVMETHOD(device_identify,	swcr_identify),
1365 	DEVMETHOD(device_probe,		swcr_probe),
1366 	DEVMETHOD(device_attach,	swcr_attach),
1367 	DEVMETHOD(device_detach,	swcr_detach),
1368 
1369 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1370 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1371 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1372 	DEVMETHOD(cryptodev_process,	swcr_process),
1373 
1374 	{0, 0},
1375 };
1376 
1377 static driver_t swcr_driver = {
1378 	"cryptosoft",
1379 	swcr_methods,
1380 	0,		/* NB: no softc */
1381 };
1382 static devclass_t swcr_devclass;
1383 
1384 /*
1385  * NB: We explicitly reference the crypto module so we
1386  * get the necessary ordering when built as a loadable
1387  * module.  This is required because we bundle the crypto
1388  * module code together with the cryptosoft driver (otherwise
1389  * normal module dependencies would handle things).
1390  */
1391 extern int crypto_modevent(struct module *, int, void *);
1392 /* XXX where to attach */
1393 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1394 MODULE_VERSION(cryptosoft, 1);
1395 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1396