xref: /freebsd/sys/opencrypto/cryptosoft.c (revision a4bcd20486f8c20cc875b39bc75aa0d5a047373f)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 };
66 
67 struct swcr_encdec {
68 	void		*sw_kschedule;
69 	struct enc_xform *sw_exf;
70 };
71 
72 struct swcr_compdec {
73 	struct comp_algo *sw_cxf;
74 };
75 
76 struct swcr_session {
77 	struct mtx	swcr_lock;
78 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	const struct crypto_session_params *csp;
106 	struct swcr_encdec *sw;
107 	struct enc_xform *exf;
108 	int i, blks, inlen, ivlen, outlen, resid;
109 	struct crypto_buffer_cursor cc_in, cc_out;
110 	const unsigned char *inblk;
111 	unsigned char *outblk;
112 	int error;
113 	bool encrypting;
114 
115 	error = 0;
116 
117 	sw = &ses->swcr_encdec;
118 	exf = sw->sw_exf;
119 	ivlen = exf->ivsize;
120 
121 	if (exf->native_blocksize == 0) {
122 		/* Check for non-padded data */
123 		if ((crp->crp_payload_length % exf->blocksize) != 0)
124 			return (EINVAL);
125 
126 		blks = exf->blocksize;
127 	} else
128 		blks = exf->native_blocksize;
129 
130 	if (exf == &enc_xform_aes_icm &&
131 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
132 		return (EINVAL);
133 
134 	if (crp->crp_cipher_key != NULL) {
135 		csp = crypto_get_params(crp->crp_session);
136 		error = exf->setkey(sw->sw_kschedule,
137 		    crp->crp_cipher_key, csp->csp_cipher_klen);
138 		if (error)
139 			return (error);
140 	}
141 
142 	crypto_read_iv(crp, iv);
143 
144 	if (exf->reinit) {
145 		/*
146 		 * xforms that provide a reinit method perform all IV
147 		 * handling themselves.
148 		 */
149 		exf->reinit(sw->sw_kschedule, iv);
150 	}
151 
152 	ivp = iv;
153 
154 	crypto_cursor_init(&cc_in, &crp->crp_buf);
155 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
156 	inlen = crypto_cursor_seglen(&cc_in);
157 	inblk = crypto_cursor_segbase(&cc_in);
158 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
159 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
160 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
161 	} else
162 		cc_out = cc_in;
163 	outlen = crypto_cursor_seglen(&cc_out);
164 	outblk = crypto_cursor_segbase(&cc_out);
165 
166 	resid = crp->crp_payload_length;
167 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
168 
169 	/*
170 	 * Loop through encrypting blocks.  'inlen' is the remaining
171 	 * length of the current segment in the input buffer.
172 	 * 'outlen' is the remaining length of current segment in the
173 	 * output buffer.
174 	 */
175 	while (resid >= blks) {
176 		/*
177 		 * If the current block is not contained within the
178 		 * current input/output segment, use 'blk' as a local
179 		 * buffer.
180 		 */
181 		if (inlen < blks) {
182 			crypto_cursor_copydata(&cc_in, blks, blk);
183 			inblk = blk;
184 		}
185 		if (outlen < blks)
186 			outblk = blk;
187 
188 		/*
189 		 * Ciphers without a 'reinit' hook are assumed to be
190 		 * used in CBC mode where the chaining is done here.
191 		 */
192 		if (exf->reinit != NULL) {
193 			if (encrypting)
194 				exf->encrypt(sw->sw_kschedule, inblk, outblk);
195 			else
196 				exf->decrypt(sw->sw_kschedule, inblk, outblk);
197 		} else if (encrypting) {
198 			/* XOR with previous block */
199 			for (i = 0; i < blks; i++)
200 				outblk[i] = inblk[i] ^ ivp[i];
201 
202 			exf->encrypt(sw->sw_kschedule, outblk, outblk);
203 
204 			/*
205 			 * Keep encrypted block for XOR'ing
206 			 * with next block
207 			 */
208 			memcpy(iv, outblk, blks);
209 			ivp = iv;
210 		} else {	/* decrypt */
211 			/*
212 			 * Keep encrypted block for XOR'ing
213 			 * with next block
214 			 */
215 			nivp = (ivp == iv) ? iv2 : iv;
216 			memcpy(nivp, inblk, blks);
217 
218 			exf->decrypt(sw->sw_kschedule, inblk, outblk);
219 
220 			/* XOR with previous block */
221 			for (i = 0; i < blks; i++)
222 				outblk[i] ^= ivp[i];
223 
224 			ivp = nivp;
225 		}
226 
227 		if (inlen < blks) {
228 			inlen = crypto_cursor_seglen(&cc_in);
229 			inblk = crypto_cursor_segbase(&cc_in);
230 		} else {
231 			crypto_cursor_advance(&cc_in, blks);
232 			inlen -= blks;
233 			inblk += blks;
234 		}
235 
236 		if (outlen < blks) {
237 			crypto_cursor_copyback(&cc_out, blks, blk);
238 			outlen = crypto_cursor_seglen(&cc_out);
239 			outblk = crypto_cursor_segbase(&cc_out);
240 		} else {
241 			crypto_cursor_advance(&cc_out, blks);
242 			outlen -= blks;
243 			outblk += blks;
244 		}
245 
246 		resid -= blks;
247 	}
248 
249 	/* Handle trailing partial block for stream ciphers. */
250 	if (resid > 0) {
251 		KASSERT(exf->native_blocksize != 0,
252 		    ("%s: partial block of %d bytes for cipher %s",
253 		    __func__, i, exf->name));
254 		KASSERT(exf->reinit != NULL,
255 		    ("%s: partial block cipher %s without reinit hook",
256 		    __func__, exf->name));
257 		KASSERT(resid < blks, ("%s: partial block too big", __func__));
258 
259 		inlen = crypto_cursor_seglen(&cc_in);
260 		outlen = crypto_cursor_seglen(&cc_out);
261 		if (inlen < resid) {
262 			crypto_cursor_copydata(&cc_in, resid, blk);
263 			inblk = blk;
264 		} else
265 			inblk = crypto_cursor_segbase(&cc_in);
266 		if (outlen < resid)
267 			outblk = blk;
268 		else
269 			outblk = crypto_cursor_segbase(&cc_out);
270 		if (encrypting)
271 			exf->encrypt_last(sw->sw_kschedule, inblk, outblk,
272 			    resid);
273 		else
274 			exf->decrypt_last(sw->sw_kschedule, inblk, outblk,
275 			    resid);
276 		if (outlen < resid)
277 			crypto_cursor_copyback(&cc_out, resid, blk);
278 	}
279 
280 	explicit_bzero(blk, sizeof(blk));
281 	explicit_bzero(iv, sizeof(iv));
282 	explicit_bzero(iv2, sizeof(iv2));
283 	return (0);
284 }
285 
286 static void
287 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
288     const uint8_t *key, int klen)
289 {
290 
291 	switch (axf->type) {
292 	case CRYPTO_SHA1_HMAC:
293 	case CRYPTO_SHA2_224_HMAC:
294 	case CRYPTO_SHA2_256_HMAC:
295 	case CRYPTO_SHA2_384_HMAC:
296 	case CRYPTO_SHA2_512_HMAC:
297 	case CRYPTO_NULL_HMAC:
298 	case CRYPTO_RIPEMD160_HMAC:
299 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
300 		hmac_init_opad(axf, key, klen, sw->sw_octx);
301 		break;
302 	case CRYPTO_POLY1305:
303 	case CRYPTO_BLAKE2B:
304 	case CRYPTO_BLAKE2S:
305 		axf->Setkey(sw->sw_ictx, key, klen);
306 		axf->Init(sw->sw_ictx);
307 		break;
308 	default:
309 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
310 	}
311 }
312 
313 /*
314  * Compute or verify hash.
315  */
316 static int
317 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
318 {
319 	u_char aalg[HASH_MAX_LEN];
320 	const struct crypto_session_params *csp;
321 	struct swcr_auth *sw;
322 	struct auth_hash *axf;
323 	union authctx ctx;
324 	int err;
325 
326 	sw = &ses->swcr_auth;
327 
328 	axf = sw->sw_axf;
329 
330 	if (crp->crp_auth_key != NULL) {
331 		csp = crypto_get_params(crp->crp_session);
332 		swcr_authprepare(axf, sw, crp->crp_auth_key,
333 		    csp->csp_auth_klen);
334 	}
335 
336 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
337 
338 	if (crp->crp_aad != NULL)
339 		err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
340 	else
341 		err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
342 		    axf->Update, &ctx);
343 	if (err)
344 		goto out;
345 
346 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
347 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
348 		err = crypto_apply_buf(&crp->crp_obuf,
349 		    crp->crp_payload_output_start, crp->crp_payload_length,
350 		    axf->Update, &ctx);
351 	else
352 		err = crypto_apply(crp, crp->crp_payload_start,
353 		    crp->crp_payload_length, axf->Update, &ctx);
354 	if (err)
355 		goto out;
356 
357 	axf->Final(aalg, &ctx);
358 	if (sw->sw_octx != NULL) {
359 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
360 		axf->Update(&ctx, aalg, axf->hashsize);
361 		axf->Final(aalg, &ctx);
362 	}
363 
364 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
365 		u_char uaalg[HASH_MAX_LEN];
366 
367 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
368 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
369 			err = EBADMSG;
370 		explicit_bzero(uaalg, sizeof(uaalg));
371 	} else {
372 		/* Inject the authentication data */
373 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
374 	}
375 	explicit_bzero(aalg, sizeof(aalg));
376 out:
377 	explicit_bzero(&ctx, sizeof(ctx));
378 	return (err);
379 }
380 
381 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
382 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
383 
384 static int
385 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
386 {
387 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
388 	u_char *blk = (u_char *)blkbuf;
389 	u_char tag[GMAC_DIGEST_LEN];
390 	u_char iv[AES_BLOCK_LEN];
391 	struct crypto_buffer_cursor cc;
392 	const u_char *inblk;
393 	union authctx ctx;
394 	struct swcr_auth *swa;
395 	struct auth_hash *axf;
396 	uint32_t *blkp;
397 	int blksz, error, ivlen, len, resid;
398 
399 	swa = &ses->swcr_auth;
400 	axf = swa->sw_axf;
401 
402 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
403 	blksz = GMAC_BLOCK_LEN;
404 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
405 	    __func__));
406 
407 	/* Initialize the IV */
408 	ivlen = AES_GCM_IV_LEN;
409 	crypto_read_iv(crp, iv);
410 
411 	axf->Reinit(&ctx, iv, ivlen);
412 	crypto_cursor_init(&cc, &crp->crp_buf);
413 	crypto_cursor_advance(&cc, crp->crp_payload_start);
414 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
415 		len = crypto_cursor_seglen(&cc);
416 		if (len >= blksz) {
417 			inblk = crypto_cursor_segbase(&cc);
418 			len = rounddown(MIN(len, resid), blksz);
419 			crypto_cursor_advance(&cc, len);
420 		} else {
421 			len = blksz;
422 			crypto_cursor_copydata(&cc, len, blk);
423 			inblk = blk;
424 		}
425 		axf->Update(&ctx, inblk, len);
426 	}
427 	if (resid > 0) {
428 		memset(blk, 0, blksz);
429 		crypto_cursor_copydata(&cc, resid, blk);
430 		axf->Update(&ctx, blk, blksz);
431 	}
432 
433 	/* length block */
434 	memset(blk, 0, blksz);
435 	blkp = (uint32_t *)blk + 1;
436 	*blkp = htobe32(crp->crp_payload_length * 8);
437 	axf->Update(&ctx, blk, blksz);
438 
439 	/* Finalize MAC */
440 	axf->Final(tag, &ctx);
441 
442 	error = 0;
443 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
444 		u_char tag2[GMAC_DIGEST_LEN];
445 
446 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
447 		    tag2);
448 		if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
449 			error = EBADMSG;
450 		explicit_bzero(tag2, sizeof(tag2));
451 	} else {
452 		/* Inject the authentication data */
453 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
454 	}
455 	explicit_bzero(blkbuf, sizeof(blkbuf));
456 	explicit_bzero(tag, sizeof(tag));
457 	explicit_bzero(iv, sizeof(iv));
458 	return (error);
459 }
460 
461 static int
462 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
463 {
464 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
465 	u_char *blk = (u_char *)blkbuf;
466 	u_char tag[GMAC_DIGEST_LEN];
467 	u_char iv[AES_BLOCK_LEN];
468 	struct crypto_buffer_cursor cc_in, cc_out;
469 	const u_char *inblk;
470 	u_char *outblk;
471 	union authctx ctx;
472 	struct swcr_auth *swa;
473 	struct swcr_encdec *swe;
474 	struct auth_hash *axf;
475 	struct enc_xform *exf;
476 	uint32_t *blkp;
477 	int blksz, error, ivlen, len, r, resid;
478 
479 	swa = &ses->swcr_auth;
480 	axf = swa->sw_axf;
481 
482 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
483 	blksz = GMAC_BLOCK_LEN;
484 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
485 	    __func__));
486 
487 	swe = &ses->swcr_encdec;
488 	exf = swe->sw_exf;
489 	KASSERT(axf->blocksize == exf->native_blocksize,
490 	    ("%s: blocksize mismatch", __func__));
491 
492 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
493 		return (EINVAL);
494 
495 	/* Initialize the IV */
496 	ivlen = AES_GCM_IV_LEN;
497 	bcopy(crp->crp_iv, iv, ivlen);
498 
499 	/* Supply MAC with IV */
500 	axf->Reinit(&ctx, iv, ivlen);
501 
502 	/* Supply MAC with AAD */
503 	if (crp->crp_aad != NULL) {
504 		len = rounddown(crp->crp_aad_length, blksz);
505 		if (len != 0)
506 			axf->Update(&ctx, crp->crp_aad, len);
507 		if (crp->crp_aad_length != len) {
508 			memset(blk, 0, blksz);
509 			memcpy(blk, (char *)crp->crp_aad + len,
510 			    crp->crp_aad_length - len);
511 			axf->Update(&ctx, blk, blksz);
512 		}
513 	} else {
514 		crypto_cursor_init(&cc_in, &crp->crp_buf);
515 		crypto_cursor_advance(&cc_in, crp->crp_aad_start);
516 		for (resid = crp->crp_aad_length; resid >= blksz;
517 		     resid -= len) {
518 			len = crypto_cursor_seglen(&cc_in);
519 			if (len >= blksz) {
520 				inblk = crypto_cursor_segbase(&cc_in);
521 				len = rounddown(MIN(len, resid), blksz);
522 				crypto_cursor_advance(&cc_in, len);
523 			} else {
524 				len = blksz;
525 				crypto_cursor_copydata(&cc_in, len, blk);
526 				inblk = blk;
527 			}
528 			axf->Update(&ctx, inblk, len);
529 		}
530 		if (resid > 0) {
531 			memset(blk, 0, blksz);
532 			crypto_cursor_copydata(&cc_in, resid, blk);
533 			axf->Update(&ctx, blk, blksz);
534 		}
535 	}
536 
537 	exf->reinit(swe->sw_kschedule, iv);
538 
539 	/* Do encryption with MAC */
540 	crypto_cursor_init(&cc_in, &crp->crp_buf);
541 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
542 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
543 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
544 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
545 	} else
546 		cc_out = cc_in;
547 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
548 		if (crypto_cursor_seglen(&cc_in) < blksz) {
549 			crypto_cursor_copydata(&cc_in, blksz, blk);
550 			inblk = blk;
551 		} else {
552 			inblk = crypto_cursor_segbase(&cc_in);
553 			crypto_cursor_advance(&cc_in, blksz);
554 		}
555 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
556 			if (crypto_cursor_seglen(&cc_out) < blksz)
557 				outblk = blk;
558 			else
559 				outblk = crypto_cursor_segbase(&cc_out);
560 			exf->encrypt(swe->sw_kschedule, inblk, outblk);
561 			axf->Update(&ctx, outblk, blksz);
562 			if (outblk == blk)
563 				crypto_cursor_copyback(&cc_out, blksz, blk);
564 			else
565 				crypto_cursor_advance(&cc_out, blksz);
566 		} else {
567 			axf->Update(&ctx, inblk, blksz);
568 		}
569 	}
570 	if (resid > 0) {
571 		crypto_cursor_copydata(&cc_in, resid, blk);
572 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
573 			exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
574 			crypto_cursor_copyback(&cc_out, resid, blk);
575 		}
576 		axf->Update(&ctx, blk, resid);
577 	}
578 
579 	/* length block */
580 	memset(blk, 0, blksz);
581 	blkp = (uint32_t *)blk + 1;
582 	*blkp = htobe32(crp->crp_aad_length * 8);
583 	blkp = (uint32_t *)blk + 3;
584 	*blkp = htobe32(crp->crp_payload_length * 8);
585 	axf->Update(&ctx, blk, blksz);
586 
587 	/* Finalize MAC */
588 	axf->Final(tag, &ctx);
589 
590 	/* Validate tag */
591 	error = 0;
592 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
593 		u_char tag2[GMAC_DIGEST_LEN];
594 
595 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
596 
597 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
598 		explicit_bzero(tag2, sizeof(tag2));
599 		if (r != 0) {
600 			error = EBADMSG;
601 			goto out;
602 		}
603 
604 		/* tag matches, decrypt data */
605 		crypto_cursor_init(&cc_in, &crp->crp_buf);
606 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
607 		for (resid = crp->crp_payload_length; resid > blksz;
608 		     resid -= blksz) {
609 			if (crypto_cursor_seglen(&cc_in) < blksz) {
610 				crypto_cursor_copydata(&cc_in, blksz, blk);
611 				inblk = blk;
612 			} else {
613 				inblk = crypto_cursor_segbase(&cc_in);
614 				crypto_cursor_advance(&cc_in, blksz);
615 			}
616 			if (crypto_cursor_seglen(&cc_out) < blksz)
617 				outblk = blk;
618 			else
619 				outblk = crypto_cursor_segbase(&cc_out);
620 			exf->decrypt(swe->sw_kschedule, inblk, outblk);
621 			if (outblk == blk)
622 				crypto_cursor_copyback(&cc_out, blksz, blk);
623 			else
624 				crypto_cursor_advance(&cc_out, blksz);
625 		}
626 		if (resid > 0) {
627 			crypto_cursor_copydata(&cc_in, resid, blk);
628 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
629 			crypto_cursor_copyback(&cc_out, resid, blk);
630 		}
631 	} else {
632 		/* Inject the authentication data */
633 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
634 	}
635 
636 out:
637 	explicit_bzero(blkbuf, sizeof(blkbuf));
638 	explicit_bzero(tag, sizeof(tag));
639 	explicit_bzero(iv, sizeof(iv));
640 
641 	return (error);
642 }
643 
644 static int
645 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
646 {
647 	u_char tag[AES_CBC_MAC_HASH_LEN];
648 	u_char iv[AES_BLOCK_LEN];
649 	union authctx ctx;
650 	struct swcr_auth *swa;
651 	struct auth_hash *axf;
652 	int error, ivlen;
653 
654 	swa = &ses->swcr_auth;
655 	axf = swa->sw_axf;
656 
657 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
658 
659 	/* Initialize the IV */
660 	ivlen = AES_CCM_IV_LEN;
661 	crypto_read_iv(crp, iv);
662 
663 	/*
664 	 * AES CCM-CBC-MAC needs to know the length of both the auth
665 	 * data and payload data before doing the auth computation.
666 	 */
667 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
668 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
669 
670 	axf->Reinit(&ctx, iv, ivlen);
671 	if (crp->crp_aad != NULL)
672 		error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
673 	else
674 		error = crypto_apply(crp, crp->crp_payload_start,
675 		    crp->crp_payload_length, axf->Update, &ctx);
676 	if (error)
677 		return (error);
678 
679 	/* Finalize MAC */
680 	axf->Final(tag, &ctx);
681 
682 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
683 		u_char tag2[AES_CBC_MAC_HASH_LEN];
684 
685 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
686 		    tag2);
687 		if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
688 			error = EBADMSG;
689 		explicit_bzero(tag2, sizeof(tag));
690 	} else {
691 		/* Inject the authentication data */
692 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
693 	}
694 	explicit_bzero(tag, sizeof(tag));
695 	explicit_bzero(iv, sizeof(iv));
696 	return (error);
697 }
698 
699 static int
700 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
701 {
702 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
703 	u_char *blk = (u_char *)blkbuf;
704 	u_char tag[AES_CBC_MAC_HASH_LEN];
705 	u_char iv[AES_BLOCK_LEN];
706 	struct crypto_buffer_cursor cc_in, cc_out;
707 	const u_char *inblk;
708 	u_char *outblk;
709 	union authctx ctx;
710 	struct swcr_auth *swa;
711 	struct swcr_encdec *swe;
712 	struct auth_hash *axf;
713 	struct enc_xform *exf;
714 	int blksz, error, ivlen, r, resid;
715 
716 	swa = &ses->swcr_auth;
717 	axf = swa->sw_axf;
718 
719 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
720 	blksz = AES_BLOCK_LEN;
721 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
722 	    __func__));
723 
724 	swe = &ses->swcr_encdec;
725 	exf = swe->sw_exf;
726 	KASSERT(axf->blocksize == exf->native_blocksize,
727 	    ("%s: blocksize mismatch", __func__));
728 
729 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
730 		return (EINVAL);
731 
732 	/* Initialize the IV */
733 	ivlen = AES_CCM_IV_LEN;
734 	bcopy(crp->crp_iv, iv, ivlen);
735 
736 	/*
737 	 * AES CCM-CBC-MAC needs to know the length of both the auth
738 	 * data and payload data before doing the auth computation.
739 	 */
740 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
741 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
742 
743 	/* Supply MAC with IV */
744 	axf->Reinit(&ctx, iv, ivlen);
745 
746 	/* Supply MAC with AAD */
747 	if (crp->crp_aad != NULL)
748 		error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
749 	else
750 		error = crypto_apply(crp, crp->crp_aad_start,
751 		    crp->crp_aad_length, axf->Update, &ctx);
752 	if (error)
753 		return (error);
754 
755 	exf->reinit(swe->sw_kschedule, iv);
756 
757 	/* Do encryption/decryption with MAC */
758 	crypto_cursor_init(&cc_in, &crp->crp_buf);
759 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
760 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
761 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
762 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
763 	} else
764 		cc_out = cc_in;
765 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
766 		if (crypto_cursor_seglen(&cc_in) < blksz) {
767 			crypto_cursor_copydata(&cc_in, blksz, blk);
768 			inblk = blk;
769 		} else {
770 			inblk = crypto_cursor_segbase(&cc_in);
771 			crypto_cursor_advance(&cc_in, blksz);
772 		}
773 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
774 			if (crypto_cursor_seglen(&cc_out) < blksz)
775 				outblk = blk;
776 			else
777 				outblk = crypto_cursor_segbase(&cc_out);
778 			axf->Update(&ctx, inblk, blksz);
779 			exf->encrypt(swe->sw_kschedule, inblk, outblk);
780 			if (outblk == blk)
781 				crypto_cursor_copyback(&cc_out, blksz, blk);
782 			else
783 				crypto_cursor_advance(&cc_out, blksz);
784 		} else {
785 			/*
786 			 * One of the problems with CCM+CBC is that
787 			 * the authentication is done on the
788 			 * unencrypted data.  As a result, we have to
789 			 * decrypt the data twice: once to generate
790 			 * the tag and a second time after the tag is
791 			 * verified.
792 			 */
793 			exf->decrypt(swe->sw_kschedule, inblk, blk);
794 			axf->Update(&ctx, blk, blksz);
795 		}
796 	}
797 	if (resid > 0) {
798 		crypto_cursor_copydata(&cc_in, resid, blk);
799 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
800 			axf->Update(&ctx, blk, resid);
801 			exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
802 			crypto_cursor_copyback(&cc_out, resid, blk);
803 		} else {
804 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
805 			axf->Update(&ctx, blk, resid);
806 		}
807 	}
808 
809 	/* Finalize MAC */
810 	axf->Final(tag, &ctx);
811 
812 	/* Validate tag */
813 	error = 0;
814 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
815 		u_char tag2[AES_CBC_MAC_HASH_LEN];
816 
817 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
818 		    tag2);
819 
820 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
821 		explicit_bzero(tag2, sizeof(tag2));
822 		if (r != 0) {
823 			error = EBADMSG;
824 			goto out;
825 		}
826 
827 		/* tag matches, decrypt data */
828 		exf->reinit(swe->sw_kschedule, iv);
829 		crypto_cursor_init(&cc_in, &crp->crp_buf);
830 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
831 		for (resid = crp->crp_payload_length; resid > blksz;
832 		     resid -= blksz) {
833 			if (crypto_cursor_seglen(&cc_in) < blksz) {
834 				crypto_cursor_copydata(&cc_in, blksz, blk);
835 				inblk = blk;
836 			} else {
837 				inblk = crypto_cursor_segbase(&cc_in);
838 				crypto_cursor_advance(&cc_in, blksz);
839 			}
840 			if (crypto_cursor_seglen(&cc_out) < blksz)
841 				outblk = blk;
842 			else
843 				outblk = crypto_cursor_segbase(&cc_out);
844 			exf->decrypt(swe->sw_kschedule, inblk, outblk);
845 			if (outblk == blk)
846 				crypto_cursor_copyback(&cc_out, blksz, blk);
847 			else
848 				crypto_cursor_advance(&cc_out, blksz);
849 		}
850 		if (resid > 0) {
851 			crypto_cursor_copydata(&cc_in, resid, blk);
852 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
853 			crypto_cursor_copyback(&cc_out, resid, blk);
854 		}
855 	} else {
856 		/* Inject the authentication data */
857 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
858 	}
859 
860 out:
861 	explicit_bzero(blkbuf, sizeof(blkbuf));
862 	explicit_bzero(tag, sizeof(tag));
863 	explicit_bzero(iv, sizeof(iv));
864 	return (error);
865 }
866 
867 /*
868  * Apply a cipher and a digest to perform EtA.
869  */
870 static int
871 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
872 {
873 	int error;
874 
875 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
876 		error = swcr_encdec(ses, crp);
877 		if (error == 0)
878 			error = swcr_authcompute(ses, crp);
879 	} else {
880 		error = swcr_authcompute(ses, crp);
881 		if (error == 0)
882 			error = swcr_encdec(ses, crp);
883 	}
884 	return (error);
885 }
886 
887 /*
888  * Apply a compression/decompression algorithm
889  */
890 static int
891 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
892 {
893 	u_int8_t *data, *out;
894 	struct comp_algo *cxf;
895 	int adj;
896 	u_int32_t result;
897 
898 	cxf = ses->swcr_compdec.sw_cxf;
899 
900 	/* We must handle the whole buffer of data in one time
901 	 * then if there is not all the data in the mbuf, we must
902 	 * copy in a buffer.
903 	 */
904 
905 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
906 	if (data == NULL)
907 		return (EINVAL);
908 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
909 	    data);
910 
911 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
912 		result = cxf->compress(data, crp->crp_payload_length, &out);
913 	else
914 		result = cxf->decompress(data, crp->crp_payload_length, &out);
915 
916 	free(data, M_CRYPTO_DATA);
917 	if (result == 0)
918 		return (EINVAL);
919 	crp->crp_olen = result;
920 
921 	/* Check the compressed size when doing compression */
922 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
923 		if (result >= crp->crp_payload_length) {
924 			/* Compression was useless, we lost time */
925 			free(out, M_CRYPTO_DATA);
926 			return (0);
927 		}
928 	}
929 
930 	/* Copy back the (de)compressed data. m_copyback is
931 	 * extending the mbuf as necessary.
932 	 */
933 	crypto_copyback(crp, crp->crp_payload_start, result, out);
934 	if (result < crp->crp_payload_length) {
935 		switch (crp->crp_buf.cb_type) {
936 		case CRYPTO_BUF_MBUF:
937 			adj = result - crp->crp_payload_length;
938 			m_adj(crp->crp_buf.cb_mbuf, adj);
939 			break;
940 		case CRYPTO_BUF_UIO: {
941 			struct uio *uio = crp->crp_buf.cb_uio;
942 			int ind;
943 
944 			adj = crp->crp_payload_length - result;
945 			ind = uio->uio_iovcnt - 1;
946 
947 			while (adj > 0 && ind >= 0) {
948 				if (adj < uio->uio_iov[ind].iov_len) {
949 					uio->uio_iov[ind].iov_len -= adj;
950 					break;
951 				}
952 
953 				adj -= uio->uio_iov[ind].iov_len;
954 				uio->uio_iov[ind].iov_len = 0;
955 				ind--;
956 				uio->uio_iovcnt--;
957 			}
958 			}
959 			break;
960 		case CRYPTO_BUF_VMPAGE:
961 			adj = crp->crp_payload_length - result;
962 			crp->crp_buf.cb_vm_page_len -= adj;
963 			break;
964 		default:
965 			break;
966 		}
967 	}
968 	free(out, M_CRYPTO_DATA);
969 	return 0;
970 }
971 
972 static int
973 swcr_setup_cipher(struct swcr_session *ses,
974     const struct crypto_session_params *csp)
975 {
976 	struct swcr_encdec *swe;
977 	struct enc_xform *txf;
978 	int error;
979 
980 	swe = &ses->swcr_encdec;
981 	txf = crypto_cipher(csp);
982 	MPASS(txf->ivsize == csp->csp_ivlen);
983 	if (txf->ctxsize != 0) {
984 		swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA,
985 		    M_NOWAIT);
986 		if (swe->sw_kschedule == NULL)
987 			return (ENOMEM);
988 	}
989 	if (csp->csp_cipher_key != NULL) {
990 		error = txf->setkey(swe->sw_kschedule,
991 		    csp->csp_cipher_key, csp->csp_cipher_klen);
992 		if (error)
993 			return (error);
994 	}
995 	swe->sw_exf = txf;
996 	return (0);
997 }
998 
999 static int
1000 swcr_setup_auth(struct swcr_session *ses,
1001     const struct crypto_session_params *csp)
1002 {
1003 	struct swcr_auth *swa;
1004 	struct auth_hash *axf;
1005 
1006 	swa = &ses->swcr_auth;
1007 
1008 	axf = crypto_auth_hash(csp);
1009 	swa->sw_axf = axf;
1010 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1011 		return (EINVAL);
1012 	if (csp->csp_auth_mlen == 0)
1013 		swa->sw_mlen = axf->hashsize;
1014 	else
1015 		swa->sw_mlen = csp->csp_auth_mlen;
1016 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1017 	if (swa->sw_ictx == NULL)
1018 		return (ENOBUFS);
1019 
1020 	switch (csp->csp_auth_alg) {
1021 	case CRYPTO_SHA1_HMAC:
1022 	case CRYPTO_SHA2_224_HMAC:
1023 	case CRYPTO_SHA2_256_HMAC:
1024 	case CRYPTO_SHA2_384_HMAC:
1025 	case CRYPTO_SHA2_512_HMAC:
1026 	case CRYPTO_NULL_HMAC:
1027 	case CRYPTO_RIPEMD160_HMAC:
1028 		swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1029 		    M_NOWAIT);
1030 		if (swa->sw_octx == NULL)
1031 			return (ENOBUFS);
1032 
1033 		if (csp->csp_auth_key != NULL) {
1034 			swcr_authprepare(axf, swa, csp->csp_auth_key,
1035 			    csp->csp_auth_klen);
1036 		}
1037 
1038 		if (csp->csp_mode == CSP_MODE_DIGEST)
1039 			ses->swcr_process = swcr_authcompute;
1040 		break;
1041 	case CRYPTO_SHA1:
1042 	case CRYPTO_SHA2_224:
1043 	case CRYPTO_SHA2_256:
1044 	case CRYPTO_SHA2_384:
1045 	case CRYPTO_SHA2_512:
1046 		axf->Init(swa->sw_ictx);
1047 		if (csp->csp_mode == CSP_MODE_DIGEST)
1048 			ses->swcr_process = swcr_authcompute;
1049 		break;
1050 	case CRYPTO_AES_NIST_GMAC:
1051 		axf->Init(swa->sw_ictx);
1052 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1053 		    csp->csp_auth_klen);
1054 		if (csp->csp_mode == CSP_MODE_DIGEST)
1055 			ses->swcr_process = swcr_gmac;
1056 		break;
1057 	case CRYPTO_POLY1305:
1058 	case CRYPTO_BLAKE2B:
1059 	case CRYPTO_BLAKE2S:
1060 		/*
1061 		 * Blake2b and Blake2s support an optional key but do
1062 		 * not require one.
1063 		 */
1064 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
1065 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1066 			    csp->csp_auth_klen);
1067 		axf->Init(swa->sw_ictx);
1068 		if (csp->csp_mode == CSP_MODE_DIGEST)
1069 			ses->swcr_process = swcr_authcompute;
1070 		break;
1071 	case CRYPTO_AES_CCM_CBC_MAC:
1072 		axf->Init(swa->sw_ictx);
1073 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1074 		    csp->csp_auth_klen);
1075 		if (csp->csp_mode == CSP_MODE_DIGEST)
1076 			ses->swcr_process = swcr_ccm_cbc_mac;
1077 		break;
1078 	}
1079 
1080 	return (0);
1081 }
1082 
1083 static int
1084 swcr_setup_gcm(struct swcr_session *ses,
1085     const struct crypto_session_params *csp)
1086 {
1087 	struct swcr_auth *swa;
1088 	struct auth_hash *axf;
1089 
1090 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
1091 		return (EINVAL);
1092 
1093 	/* First, setup the auth side. */
1094 	swa = &ses->swcr_auth;
1095 	switch (csp->csp_cipher_klen * 8) {
1096 	case 128:
1097 		axf = &auth_hash_nist_gmac_aes_128;
1098 		break;
1099 	case 192:
1100 		axf = &auth_hash_nist_gmac_aes_192;
1101 		break;
1102 	case 256:
1103 		axf = &auth_hash_nist_gmac_aes_256;
1104 		break;
1105 	default:
1106 		return (EINVAL);
1107 	}
1108 	swa->sw_axf = axf;
1109 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1110 		return (EINVAL);
1111 	if (csp->csp_auth_mlen == 0)
1112 		swa->sw_mlen = axf->hashsize;
1113 	else
1114 		swa->sw_mlen = csp->csp_auth_mlen;
1115 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1116 	if (swa->sw_ictx == NULL)
1117 		return (ENOBUFS);
1118 	axf->Init(swa->sw_ictx);
1119 	if (csp->csp_cipher_key != NULL)
1120 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1121 		    csp->csp_cipher_klen);
1122 
1123 	/* Second, setup the cipher side. */
1124 	return (swcr_setup_cipher(ses, csp));
1125 }
1126 
1127 static int
1128 swcr_setup_ccm(struct swcr_session *ses,
1129     const struct crypto_session_params *csp)
1130 {
1131 	struct swcr_auth *swa;
1132 	struct auth_hash *axf;
1133 
1134 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1135 		return (EINVAL);
1136 
1137 	/* First, setup the auth side. */
1138 	swa = &ses->swcr_auth;
1139 	switch (csp->csp_cipher_klen * 8) {
1140 	case 128:
1141 		axf = &auth_hash_ccm_cbc_mac_128;
1142 		break;
1143 	case 192:
1144 		axf = &auth_hash_ccm_cbc_mac_192;
1145 		break;
1146 	case 256:
1147 		axf = &auth_hash_ccm_cbc_mac_256;
1148 		break;
1149 	default:
1150 		return (EINVAL);
1151 	}
1152 	swa->sw_axf = axf;
1153 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1154 		return (EINVAL);
1155 	if (csp->csp_auth_mlen == 0)
1156 		swa->sw_mlen = axf->hashsize;
1157 	else
1158 		swa->sw_mlen = csp->csp_auth_mlen;
1159 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1160 	if (swa->sw_ictx == NULL)
1161 		return (ENOBUFS);
1162 	axf->Init(swa->sw_ictx);
1163 	if (csp->csp_cipher_key != NULL)
1164 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1165 		    csp->csp_cipher_klen);
1166 
1167 	/* Second, setup the cipher side. */
1168 	return (swcr_setup_cipher(ses, csp));
1169 }
1170 
1171 static bool
1172 swcr_auth_supported(const struct crypto_session_params *csp)
1173 {
1174 	struct auth_hash *axf;
1175 
1176 	axf = crypto_auth_hash(csp);
1177 	if (axf == NULL)
1178 		return (false);
1179 	switch (csp->csp_auth_alg) {
1180 	case CRYPTO_SHA1_HMAC:
1181 	case CRYPTO_SHA2_224_HMAC:
1182 	case CRYPTO_SHA2_256_HMAC:
1183 	case CRYPTO_SHA2_384_HMAC:
1184 	case CRYPTO_SHA2_512_HMAC:
1185 	case CRYPTO_NULL_HMAC:
1186 	case CRYPTO_RIPEMD160_HMAC:
1187 		break;
1188 	case CRYPTO_AES_NIST_GMAC:
1189 		switch (csp->csp_auth_klen * 8) {
1190 		case 128:
1191 		case 192:
1192 		case 256:
1193 			break;
1194 		default:
1195 			return (false);
1196 		}
1197 		if (csp->csp_auth_key == NULL)
1198 			return (false);
1199 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1200 			return (false);
1201 		break;
1202 	case CRYPTO_POLY1305:
1203 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1204 			return (false);
1205 		break;
1206 	case CRYPTO_AES_CCM_CBC_MAC:
1207 		switch (csp->csp_auth_klen * 8) {
1208 		case 128:
1209 		case 192:
1210 		case 256:
1211 			break;
1212 		default:
1213 			return (false);
1214 		}
1215 		if (csp->csp_auth_key == NULL)
1216 			return (false);
1217 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1218 			return (false);
1219 		break;
1220 	}
1221 	return (true);
1222 }
1223 
1224 static bool
1225 swcr_cipher_supported(const struct crypto_session_params *csp)
1226 {
1227 	struct enc_xform *txf;
1228 
1229 	txf = crypto_cipher(csp);
1230 	if (txf == NULL)
1231 		return (false);
1232 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1233 	    txf->ivsize != csp->csp_ivlen)
1234 		return (false);
1235 	return (true);
1236 }
1237 
1238 static int
1239 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1240 {
1241 
1242 	if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
1243 	    0)
1244 		return (EINVAL);
1245 	switch (csp->csp_mode) {
1246 	case CSP_MODE_COMPRESS:
1247 		switch (csp->csp_cipher_alg) {
1248 		case CRYPTO_DEFLATE_COMP:
1249 			break;
1250 		default:
1251 			return (EINVAL);
1252 		}
1253 		break;
1254 	case CSP_MODE_CIPHER:
1255 		switch (csp->csp_cipher_alg) {
1256 		case CRYPTO_AES_NIST_GCM_16:
1257 		case CRYPTO_AES_CCM_16:
1258 			return (EINVAL);
1259 		default:
1260 			if (!swcr_cipher_supported(csp))
1261 				return (EINVAL);
1262 			break;
1263 		}
1264 		break;
1265 	case CSP_MODE_DIGEST:
1266 		if (!swcr_auth_supported(csp))
1267 			return (EINVAL);
1268 		break;
1269 	case CSP_MODE_AEAD:
1270 		switch (csp->csp_cipher_alg) {
1271 		case CRYPTO_AES_NIST_GCM_16:
1272 		case CRYPTO_AES_CCM_16:
1273 			break;
1274 		default:
1275 			return (EINVAL);
1276 		}
1277 		break;
1278 	case CSP_MODE_ETA:
1279 		/* AEAD algorithms cannot be used for EtA. */
1280 		switch (csp->csp_cipher_alg) {
1281 		case CRYPTO_AES_NIST_GCM_16:
1282 		case CRYPTO_AES_CCM_16:
1283 			return (EINVAL);
1284 		}
1285 		switch (csp->csp_auth_alg) {
1286 		case CRYPTO_AES_NIST_GMAC:
1287 		case CRYPTO_AES_CCM_CBC_MAC:
1288 			return (EINVAL);
1289 		}
1290 
1291 		if (!swcr_cipher_supported(csp) ||
1292 		    !swcr_auth_supported(csp))
1293 			return (EINVAL);
1294 		break;
1295 	default:
1296 		return (EINVAL);
1297 	}
1298 
1299 	return (CRYPTODEV_PROBE_SOFTWARE);
1300 }
1301 
1302 /*
1303  * Generate a new software session.
1304  */
1305 static int
1306 swcr_newsession(device_t dev, crypto_session_t cses,
1307     const struct crypto_session_params *csp)
1308 {
1309 	struct swcr_session *ses;
1310 	struct swcr_encdec *swe;
1311 	struct swcr_auth *swa;
1312 	struct comp_algo *cxf;
1313 	int error;
1314 
1315 	ses = crypto_get_driver_session(cses);
1316 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1317 
1318 	error = 0;
1319 	swe = &ses->swcr_encdec;
1320 	swa = &ses->swcr_auth;
1321 	switch (csp->csp_mode) {
1322 	case CSP_MODE_COMPRESS:
1323 		switch (csp->csp_cipher_alg) {
1324 		case CRYPTO_DEFLATE_COMP:
1325 			cxf = &comp_algo_deflate;
1326 			break;
1327 #ifdef INVARIANTS
1328 		default:
1329 			panic("bad compression algo");
1330 #endif
1331 		}
1332 		ses->swcr_compdec.sw_cxf = cxf;
1333 		ses->swcr_process = swcr_compdec;
1334 		break;
1335 	case CSP_MODE_CIPHER:
1336 		switch (csp->csp_cipher_alg) {
1337 		case CRYPTO_NULL_CBC:
1338 			ses->swcr_process = swcr_null;
1339 			break;
1340 #ifdef INVARIANTS
1341 		case CRYPTO_AES_NIST_GCM_16:
1342 		case CRYPTO_AES_CCM_16:
1343 			panic("bad cipher algo");
1344 #endif
1345 		default:
1346 			error = swcr_setup_cipher(ses, csp);
1347 			if (error == 0)
1348 				ses->swcr_process = swcr_encdec;
1349 		}
1350 		break;
1351 	case CSP_MODE_DIGEST:
1352 		error = swcr_setup_auth(ses, csp);
1353 		break;
1354 	case CSP_MODE_AEAD:
1355 		switch (csp->csp_cipher_alg) {
1356 		case CRYPTO_AES_NIST_GCM_16:
1357 			error = swcr_setup_gcm(ses, csp);
1358 			if (error == 0)
1359 				ses->swcr_process = swcr_gcm;
1360 			break;
1361 		case CRYPTO_AES_CCM_16:
1362 			error = swcr_setup_ccm(ses, csp);
1363 			if (error == 0)
1364 				ses->swcr_process = swcr_ccm;
1365 			break;
1366 #ifdef INVARIANTS
1367 		default:
1368 			panic("bad aead algo");
1369 #endif
1370 		}
1371 		break;
1372 	case CSP_MODE_ETA:
1373 #ifdef INVARIANTS
1374 		switch (csp->csp_cipher_alg) {
1375 		case CRYPTO_AES_NIST_GCM_16:
1376 		case CRYPTO_AES_CCM_16:
1377 			panic("bad eta cipher algo");
1378 		}
1379 		switch (csp->csp_auth_alg) {
1380 		case CRYPTO_AES_NIST_GMAC:
1381 		case CRYPTO_AES_CCM_CBC_MAC:
1382 			panic("bad eta auth algo");
1383 		}
1384 #endif
1385 
1386 		error = swcr_setup_auth(ses, csp);
1387 		if (error)
1388 			break;
1389 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1390 			/* Effectively degrade to digest mode. */
1391 			ses->swcr_process = swcr_authcompute;
1392 			break;
1393 		}
1394 
1395 		error = swcr_setup_cipher(ses, csp);
1396 		if (error == 0)
1397 			ses->swcr_process = swcr_eta;
1398 		break;
1399 	default:
1400 		error = EINVAL;
1401 	}
1402 
1403 	if (error)
1404 		swcr_freesession(dev, cses);
1405 	return (error);
1406 }
1407 
1408 static void
1409 swcr_freesession(device_t dev, crypto_session_t cses)
1410 {
1411 	struct swcr_session *ses;
1412 
1413 	ses = crypto_get_driver_session(cses);
1414 
1415 	mtx_destroy(&ses->swcr_lock);
1416 
1417 	zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA);
1418 	zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
1419 	zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
1420 }
1421 
1422 /*
1423  * Process a software request.
1424  */
1425 static int
1426 swcr_process(device_t dev, struct cryptop *crp, int hint)
1427 {
1428 	struct swcr_session *ses;
1429 
1430 	ses = crypto_get_driver_session(crp->crp_session);
1431 	mtx_lock(&ses->swcr_lock);
1432 
1433 	crp->crp_etype = ses->swcr_process(ses, crp);
1434 
1435 	mtx_unlock(&ses->swcr_lock);
1436 	crypto_done(crp);
1437 	return (0);
1438 }
1439 
1440 static void
1441 swcr_identify(driver_t *drv, device_t parent)
1442 {
1443 	/* NB: order 10 is so we get attached after h/w devices */
1444 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1445 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1446 		panic("cryptosoft: could not attach");
1447 }
1448 
1449 static int
1450 swcr_probe(device_t dev)
1451 {
1452 	device_set_desc(dev, "software crypto");
1453 	return (BUS_PROBE_NOWILDCARD);
1454 }
1455 
1456 static int
1457 swcr_attach(device_t dev)
1458 {
1459 
1460 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1461 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1462 	if (swcr_id < 0) {
1463 		device_printf(dev, "cannot initialize!");
1464 		return (ENXIO);
1465 	}
1466 
1467 	return (0);
1468 }
1469 
1470 static int
1471 swcr_detach(device_t dev)
1472 {
1473 	crypto_unregister_all(swcr_id);
1474 	return 0;
1475 }
1476 
1477 static device_method_t swcr_methods[] = {
1478 	DEVMETHOD(device_identify,	swcr_identify),
1479 	DEVMETHOD(device_probe,		swcr_probe),
1480 	DEVMETHOD(device_attach,	swcr_attach),
1481 	DEVMETHOD(device_detach,	swcr_detach),
1482 
1483 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1484 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1485 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1486 	DEVMETHOD(cryptodev_process,	swcr_process),
1487 
1488 	{0, 0},
1489 };
1490 
1491 static driver_t swcr_driver = {
1492 	"cryptosoft",
1493 	swcr_methods,
1494 	0,		/* NB: no softc */
1495 };
1496 static devclass_t swcr_devclass;
1497 
1498 /*
1499  * NB: We explicitly reference the crypto module so we
1500  * get the necessary ordering when built as a loadable
1501  * module.  This is required because we bundle the crypto
1502  * module code together with the cryptosoft driver (otherwise
1503  * normal module dependencies would handle things).
1504  */
1505 extern int crypto_modevent(struct module *, int, void *);
1506 /* XXX where to attach */
1507 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1508 MODULE_VERSION(cryptosoft, 1);
1509 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1510