xref: /freebsd/sys/opencrypto/cryptosoft.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 };
66 
67 struct swcr_encdec {
68 	void		*sw_kschedule;
69 	struct enc_xform *sw_exf;
70 };
71 
72 struct swcr_compdec {
73 	struct comp_algo *sw_cxf;
74 };
75 
76 struct swcr_session {
77 	struct mtx	swcr_lock;
78 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	const struct crypto_session_params *csp;
106 	struct swcr_encdec *sw;
107 	struct enc_xform *exf;
108 	int i, blks, inlen, ivlen, outlen, resid;
109 	struct crypto_buffer_cursor cc_in, cc_out;
110 	const unsigned char *inblk;
111 	unsigned char *outblk;
112 	int error;
113 	bool encrypting;
114 
115 	error = 0;
116 
117 	sw = &ses->swcr_encdec;
118 	exf = sw->sw_exf;
119 	ivlen = exf->ivsize;
120 
121 	if (exf->native_blocksize == 0) {
122 		/* Check for non-padded data */
123 		if ((crp->crp_payload_length % exf->blocksize) != 0)
124 			return (EINVAL);
125 
126 		blks = exf->blocksize;
127 	} else
128 		blks = exf->native_blocksize;
129 
130 	if (exf == &enc_xform_aes_icm &&
131 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
132 		return (EINVAL);
133 
134 	if (crp->crp_cipher_key != NULL) {
135 		csp = crypto_get_params(crp->crp_session);
136 		error = exf->setkey(sw->sw_kschedule,
137 		    crp->crp_cipher_key, csp->csp_cipher_klen);
138 		if (error)
139 			return (error);
140 	}
141 
142 	crypto_read_iv(crp, iv);
143 
144 	if (exf->reinit) {
145 		/*
146 		 * xforms that provide a reinit method perform all IV
147 		 * handling themselves.
148 		 */
149 		exf->reinit(sw->sw_kschedule, iv);
150 	}
151 
152 	ivp = iv;
153 
154 	crypto_cursor_init(&cc_in, &crp->crp_buf);
155 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
156 	inlen = crypto_cursor_seglen(&cc_in);
157 	inblk = crypto_cursor_segbase(&cc_in);
158 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
159 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
160 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
161 	} else
162 		cc_out = cc_in;
163 	outlen = crypto_cursor_seglen(&cc_out);
164 	outblk = crypto_cursor_segbase(&cc_out);
165 
166 	resid = crp->crp_payload_length;
167 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
168 
169 	/*
170 	 * Loop through encrypting blocks.  'inlen' is the remaining
171 	 * length of the current segment in the input buffer.
172 	 * 'outlen' is the remaining length of current segment in the
173 	 * output buffer.
174 	 */
175 	while (resid >= blks) {
176 		/*
177 		 * If the current block is not contained within the
178 		 * current input/output segment, use 'blk' as a local
179 		 * buffer.
180 		 */
181 		if (inlen < blks) {
182 			crypto_cursor_copydata(&cc_in, blks, blk);
183 			inblk = blk;
184 		}
185 		if (outlen < blks)
186 			outblk = blk;
187 
188 		/*
189 		 * Ciphers without a 'reinit' hook are assumed to be
190 		 * used in CBC mode where the chaining is done here.
191 		 */
192 		if (exf->reinit != NULL) {
193 			if (encrypting)
194 				exf->encrypt(sw->sw_kschedule, inblk, outblk);
195 			else
196 				exf->decrypt(sw->sw_kschedule, inblk, outblk);
197 		} else if (encrypting) {
198 			/* XOR with previous block */
199 			for (i = 0; i < blks; i++)
200 				outblk[i] = inblk[i] ^ ivp[i];
201 
202 			exf->encrypt(sw->sw_kschedule, outblk, outblk);
203 
204 			/*
205 			 * Keep encrypted block for XOR'ing
206 			 * with next block
207 			 */
208 			memcpy(iv, outblk, blks);
209 			ivp = iv;
210 		} else {	/* decrypt */
211 			/*
212 			 * Keep encrypted block for XOR'ing
213 			 * with next block
214 			 */
215 			nivp = (ivp == iv) ? iv2 : iv;
216 			memcpy(nivp, inblk, blks);
217 
218 			exf->decrypt(sw->sw_kschedule, inblk, outblk);
219 
220 			/* XOR with previous block */
221 			for (i = 0; i < blks; i++)
222 				outblk[i] ^= ivp[i];
223 
224 			ivp = nivp;
225 		}
226 
227 		if (inlen < blks) {
228 			inlen = crypto_cursor_seglen(&cc_in);
229 			inblk = crypto_cursor_segbase(&cc_in);
230 		} else {
231 			crypto_cursor_advance(&cc_in, blks);
232 			inlen -= blks;
233 			inblk += blks;
234 		}
235 
236 		if (outlen < blks) {
237 			crypto_cursor_copyback(&cc_out, blks, blk);
238 			outlen = crypto_cursor_seglen(&cc_out);
239 			outblk = crypto_cursor_segbase(&cc_out);
240 		} else {
241 			crypto_cursor_advance(&cc_out, blks);
242 			outlen -= blks;
243 			outblk += blks;
244 		}
245 
246 		resid -= blks;
247 	}
248 
249 	/* Handle trailing partial block for stream ciphers. */
250 	if (resid > 0) {
251 		KASSERT(exf->native_blocksize != 0,
252 		    ("%s: partial block of %d bytes for cipher %s",
253 		    __func__, i, exf->name));
254 		KASSERT(exf->reinit != NULL,
255 		    ("%s: partial block cipher %s without reinit hook",
256 		    __func__, exf->name));
257 		KASSERT(resid < blks, ("%s: partial block too big", __func__));
258 
259 		inlen = crypto_cursor_seglen(&cc_in);
260 		outlen = crypto_cursor_seglen(&cc_out);
261 		if (inlen < resid) {
262 			crypto_cursor_copydata(&cc_in, resid, blk);
263 			inblk = blk;
264 		} else
265 			inblk = crypto_cursor_segbase(&cc_in);
266 		if (outlen < resid)
267 			outblk = blk;
268 		else
269 			outblk = crypto_cursor_segbase(&cc_out);
270 		if (encrypting)
271 			exf->encrypt_last(sw->sw_kschedule, inblk, outblk,
272 			    resid);
273 		else
274 			exf->decrypt_last(sw->sw_kschedule, inblk, outblk,
275 			    resid);
276 		if (outlen < resid)
277 			crypto_cursor_copyback(&cc_out, resid, blk);
278 	}
279 
280 	explicit_bzero(blk, sizeof(blk));
281 	explicit_bzero(iv, sizeof(iv));
282 	explicit_bzero(iv2, sizeof(iv2));
283 	return (0);
284 }
285 
286 static void
287 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
288     const uint8_t *key, int klen)
289 {
290 
291 	switch (axf->type) {
292 	case CRYPTO_SHA1_HMAC:
293 	case CRYPTO_SHA2_224_HMAC:
294 	case CRYPTO_SHA2_256_HMAC:
295 	case CRYPTO_SHA2_384_HMAC:
296 	case CRYPTO_SHA2_512_HMAC:
297 	case CRYPTO_NULL_HMAC:
298 	case CRYPTO_RIPEMD160_HMAC:
299 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
300 		hmac_init_opad(axf, key, klen, sw->sw_octx);
301 		break;
302 	case CRYPTO_POLY1305:
303 	case CRYPTO_BLAKE2B:
304 	case CRYPTO_BLAKE2S:
305 		axf->Setkey(sw->sw_ictx, key, klen);
306 		axf->Init(sw->sw_ictx);
307 		break;
308 	default:
309 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
310 	}
311 }
312 
313 /*
314  * Compute or verify hash.
315  */
316 static int
317 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
318 {
319 	u_char aalg[HASH_MAX_LEN];
320 	const struct crypto_session_params *csp;
321 	struct swcr_auth *sw;
322 	struct auth_hash *axf;
323 	union authctx ctx;
324 	int err;
325 
326 	sw = &ses->swcr_auth;
327 
328 	axf = sw->sw_axf;
329 
330 	csp = crypto_get_params(crp->crp_session);
331 	if (crp->crp_auth_key != NULL) {
332 		swcr_authprepare(axf, sw, crp->crp_auth_key,
333 		    csp->csp_auth_klen);
334 	}
335 
336 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
337 
338 	if (crp->crp_aad != NULL)
339 		err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
340 	else
341 		err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
342 		    axf->Update, &ctx);
343 	if (err)
344 		goto out;
345 
346 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
347 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
348 		err = crypto_apply_buf(&crp->crp_obuf,
349 		    crp->crp_payload_output_start, crp->crp_payload_length,
350 		    axf->Update, &ctx);
351 	else
352 		err = crypto_apply(crp, crp->crp_payload_start,
353 		    crp->crp_payload_length, axf->Update, &ctx);
354 	if (err)
355 		goto out;
356 
357 	if (csp->csp_flags & CSP_F_ESN)
358 		axf->Update(&ctx, crp->crp_esn, 4);
359 
360 	axf->Final(aalg, &ctx);
361 	if (sw->sw_octx != NULL) {
362 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
363 		axf->Update(&ctx, aalg, axf->hashsize);
364 		axf->Final(aalg, &ctx);
365 	}
366 
367 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
368 		u_char uaalg[HASH_MAX_LEN];
369 
370 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
371 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
372 			err = EBADMSG;
373 		explicit_bzero(uaalg, sizeof(uaalg));
374 	} else {
375 		/* Inject the authentication data */
376 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
377 	}
378 	explicit_bzero(aalg, sizeof(aalg));
379 out:
380 	explicit_bzero(&ctx, sizeof(ctx));
381 	return (err);
382 }
383 
384 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
385 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
386 
387 static int
388 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
389 {
390 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
391 	u_char *blk = (u_char *)blkbuf;
392 	u_char tag[GMAC_DIGEST_LEN];
393 	u_char iv[AES_BLOCK_LEN];
394 	struct crypto_buffer_cursor cc;
395 	const u_char *inblk;
396 	union authctx ctx;
397 	struct swcr_auth *swa;
398 	struct auth_hash *axf;
399 	uint32_t *blkp;
400 	int blksz, error, ivlen, len, resid;
401 
402 	swa = &ses->swcr_auth;
403 	axf = swa->sw_axf;
404 
405 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
406 	blksz = GMAC_BLOCK_LEN;
407 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
408 	    __func__));
409 
410 	/* Initialize the IV */
411 	ivlen = AES_GCM_IV_LEN;
412 	crypto_read_iv(crp, iv);
413 
414 	axf->Reinit(&ctx, iv, ivlen);
415 	crypto_cursor_init(&cc, &crp->crp_buf);
416 	crypto_cursor_advance(&cc, crp->crp_payload_start);
417 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
418 		len = crypto_cursor_seglen(&cc);
419 		if (len >= blksz) {
420 			inblk = crypto_cursor_segbase(&cc);
421 			len = rounddown(MIN(len, resid), blksz);
422 			crypto_cursor_advance(&cc, len);
423 		} else {
424 			len = blksz;
425 			crypto_cursor_copydata(&cc, len, blk);
426 			inblk = blk;
427 		}
428 		axf->Update(&ctx, inblk, len);
429 	}
430 	if (resid > 0) {
431 		memset(blk, 0, blksz);
432 		crypto_cursor_copydata(&cc, resid, blk);
433 		axf->Update(&ctx, blk, blksz);
434 	}
435 
436 	/* length block */
437 	memset(blk, 0, blksz);
438 	blkp = (uint32_t *)blk + 1;
439 	*blkp = htobe32(crp->crp_payload_length * 8);
440 	axf->Update(&ctx, blk, blksz);
441 
442 	/* Finalize MAC */
443 	axf->Final(tag, &ctx);
444 
445 	error = 0;
446 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
447 		u_char tag2[GMAC_DIGEST_LEN];
448 
449 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
450 		    tag2);
451 		if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
452 			error = EBADMSG;
453 		explicit_bzero(tag2, sizeof(tag2));
454 	} else {
455 		/* Inject the authentication data */
456 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
457 	}
458 	explicit_bzero(blkbuf, sizeof(blkbuf));
459 	explicit_bzero(tag, sizeof(tag));
460 	explicit_bzero(iv, sizeof(iv));
461 	return (error);
462 }
463 
464 static int
465 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
466 {
467 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
468 	u_char *blk = (u_char *)blkbuf;
469 	u_char tag[GMAC_DIGEST_LEN];
470 	u_char iv[AES_BLOCK_LEN];
471 	struct crypto_buffer_cursor cc_in, cc_out;
472 	const u_char *inblk;
473 	u_char *outblk;
474 	union authctx ctx;
475 	struct swcr_auth *swa;
476 	struct swcr_encdec *swe;
477 	struct auth_hash *axf;
478 	struct enc_xform *exf;
479 	uint32_t *blkp;
480 	int blksz, error, ivlen, len, r, resid;
481 
482 	swa = &ses->swcr_auth;
483 	axf = swa->sw_axf;
484 
485 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
486 	blksz = GMAC_BLOCK_LEN;
487 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
488 	    __func__));
489 
490 	swe = &ses->swcr_encdec;
491 	exf = swe->sw_exf;
492 	KASSERT(axf->blocksize == exf->native_blocksize,
493 	    ("%s: blocksize mismatch", __func__));
494 
495 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
496 		return (EINVAL);
497 
498 	/* Initialize the IV */
499 	ivlen = AES_GCM_IV_LEN;
500 	bcopy(crp->crp_iv, iv, ivlen);
501 
502 	/* Supply MAC with IV */
503 	axf->Reinit(&ctx, iv, ivlen);
504 
505 	/* Supply MAC with AAD */
506 	if (crp->crp_aad != NULL) {
507 		len = rounddown(crp->crp_aad_length, blksz);
508 		if (len != 0)
509 			axf->Update(&ctx, crp->crp_aad, len);
510 		if (crp->crp_aad_length != len) {
511 			memset(blk, 0, blksz);
512 			memcpy(blk, (char *)crp->crp_aad + len,
513 			    crp->crp_aad_length - len);
514 			axf->Update(&ctx, blk, blksz);
515 		}
516 	} else {
517 		crypto_cursor_init(&cc_in, &crp->crp_buf);
518 		crypto_cursor_advance(&cc_in, crp->crp_aad_start);
519 		for (resid = crp->crp_aad_length; resid >= blksz;
520 		     resid -= len) {
521 			len = crypto_cursor_seglen(&cc_in);
522 			if (len >= blksz) {
523 				inblk = crypto_cursor_segbase(&cc_in);
524 				len = rounddown(MIN(len, resid), blksz);
525 				crypto_cursor_advance(&cc_in, len);
526 			} else {
527 				len = blksz;
528 				crypto_cursor_copydata(&cc_in, len, blk);
529 				inblk = blk;
530 			}
531 			axf->Update(&ctx, inblk, len);
532 		}
533 		if (resid > 0) {
534 			memset(blk, 0, blksz);
535 			crypto_cursor_copydata(&cc_in, resid, blk);
536 			axf->Update(&ctx, blk, blksz);
537 		}
538 	}
539 
540 	exf->reinit(swe->sw_kschedule, iv);
541 
542 	/* Do encryption with MAC */
543 	crypto_cursor_init(&cc_in, &crp->crp_buf);
544 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
545 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
546 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
547 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
548 	} else
549 		cc_out = cc_in;
550 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
551 		if (crypto_cursor_seglen(&cc_in) < blksz) {
552 			crypto_cursor_copydata(&cc_in, blksz, blk);
553 			inblk = blk;
554 		} else {
555 			inblk = crypto_cursor_segbase(&cc_in);
556 			crypto_cursor_advance(&cc_in, blksz);
557 		}
558 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
559 			if (crypto_cursor_seglen(&cc_out) < blksz)
560 				outblk = blk;
561 			else
562 				outblk = crypto_cursor_segbase(&cc_out);
563 			exf->encrypt(swe->sw_kschedule, inblk, outblk);
564 			axf->Update(&ctx, outblk, blksz);
565 			if (outblk == blk)
566 				crypto_cursor_copyback(&cc_out, blksz, blk);
567 			else
568 				crypto_cursor_advance(&cc_out, blksz);
569 		} else {
570 			axf->Update(&ctx, inblk, blksz);
571 		}
572 	}
573 	if (resid > 0) {
574 		crypto_cursor_copydata(&cc_in, resid, blk);
575 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
576 			exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
577 			crypto_cursor_copyback(&cc_out, resid, blk);
578 		}
579 		axf->Update(&ctx, blk, resid);
580 	}
581 
582 	/* length block */
583 	memset(blk, 0, blksz);
584 	blkp = (uint32_t *)blk + 1;
585 	*blkp = htobe32(crp->crp_aad_length * 8);
586 	blkp = (uint32_t *)blk + 3;
587 	*blkp = htobe32(crp->crp_payload_length * 8);
588 	axf->Update(&ctx, blk, blksz);
589 
590 	/* Finalize MAC */
591 	axf->Final(tag, &ctx);
592 
593 	/* Validate tag */
594 	error = 0;
595 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
596 		u_char tag2[GMAC_DIGEST_LEN];
597 
598 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
599 
600 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
601 		explicit_bzero(tag2, sizeof(tag2));
602 		if (r != 0) {
603 			error = EBADMSG;
604 			goto out;
605 		}
606 
607 		/* tag matches, decrypt data */
608 		crypto_cursor_init(&cc_in, &crp->crp_buf);
609 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
610 		for (resid = crp->crp_payload_length; resid > blksz;
611 		     resid -= blksz) {
612 			if (crypto_cursor_seglen(&cc_in) < blksz) {
613 				crypto_cursor_copydata(&cc_in, blksz, blk);
614 				inblk = blk;
615 			} else {
616 				inblk = crypto_cursor_segbase(&cc_in);
617 				crypto_cursor_advance(&cc_in, blksz);
618 			}
619 			if (crypto_cursor_seglen(&cc_out) < blksz)
620 				outblk = blk;
621 			else
622 				outblk = crypto_cursor_segbase(&cc_out);
623 			exf->decrypt(swe->sw_kschedule, inblk, outblk);
624 			if (outblk == blk)
625 				crypto_cursor_copyback(&cc_out, blksz, blk);
626 			else
627 				crypto_cursor_advance(&cc_out, blksz);
628 		}
629 		if (resid > 0) {
630 			crypto_cursor_copydata(&cc_in, resid, blk);
631 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
632 			crypto_cursor_copyback(&cc_out, resid, blk);
633 		}
634 	} else {
635 		/* Inject the authentication data */
636 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
637 	}
638 
639 out:
640 	explicit_bzero(blkbuf, sizeof(blkbuf));
641 	explicit_bzero(tag, sizeof(tag));
642 	explicit_bzero(iv, sizeof(iv));
643 
644 	return (error);
645 }
646 
647 static int
648 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
649 {
650 	u_char tag[AES_CBC_MAC_HASH_LEN];
651 	u_char iv[AES_BLOCK_LEN];
652 	union authctx ctx;
653 	struct swcr_auth *swa;
654 	struct auth_hash *axf;
655 	int error, ivlen;
656 
657 	swa = &ses->swcr_auth;
658 	axf = swa->sw_axf;
659 
660 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
661 
662 	/* Initialize the IV */
663 	ivlen = AES_CCM_IV_LEN;
664 	crypto_read_iv(crp, iv);
665 
666 	/*
667 	 * AES CCM-CBC-MAC needs to know the length of both the auth
668 	 * data and payload data before doing the auth computation.
669 	 */
670 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
671 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
672 
673 	axf->Reinit(&ctx, iv, ivlen);
674 	if (crp->crp_aad != NULL)
675 		error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
676 	else
677 		error = crypto_apply(crp, crp->crp_payload_start,
678 		    crp->crp_payload_length, axf->Update, &ctx);
679 	if (error)
680 		return (error);
681 
682 	/* Finalize MAC */
683 	axf->Final(tag, &ctx);
684 
685 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
686 		u_char tag2[AES_CBC_MAC_HASH_LEN];
687 
688 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
689 		    tag2);
690 		if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
691 			error = EBADMSG;
692 		explicit_bzero(tag2, sizeof(tag));
693 	} else {
694 		/* Inject the authentication data */
695 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
696 	}
697 	explicit_bzero(tag, sizeof(tag));
698 	explicit_bzero(iv, sizeof(iv));
699 	return (error);
700 }
701 
702 static int
703 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
704 {
705 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
706 	u_char *blk = (u_char *)blkbuf;
707 	u_char tag[AES_CBC_MAC_HASH_LEN];
708 	u_char iv[AES_BLOCK_LEN];
709 	struct crypto_buffer_cursor cc_in, cc_out;
710 	const u_char *inblk;
711 	u_char *outblk;
712 	union authctx ctx;
713 	struct swcr_auth *swa;
714 	struct swcr_encdec *swe;
715 	struct auth_hash *axf;
716 	struct enc_xform *exf;
717 	int blksz, error, ivlen, r, resid;
718 
719 	swa = &ses->swcr_auth;
720 	axf = swa->sw_axf;
721 
722 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
723 	blksz = AES_BLOCK_LEN;
724 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
725 	    __func__));
726 
727 	swe = &ses->swcr_encdec;
728 	exf = swe->sw_exf;
729 	KASSERT(axf->blocksize == exf->native_blocksize,
730 	    ("%s: blocksize mismatch", __func__));
731 
732 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
733 		return (EINVAL);
734 
735 	/* Initialize the IV */
736 	ivlen = AES_CCM_IV_LEN;
737 	bcopy(crp->crp_iv, iv, ivlen);
738 
739 	/*
740 	 * AES CCM-CBC-MAC needs to know the length of both the auth
741 	 * data and payload data before doing the auth computation.
742 	 */
743 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
744 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
745 
746 	/* Supply MAC with IV */
747 	axf->Reinit(&ctx, iv, ivlen);
748 
749 	/* Supply MAC with AAD */
750 	if (crp->crp_aad != NULL)
751 		error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
752 	else
753 		error = crypto_apply(crp, crp->crp_aad_start,
754 		    crp->crp_aad_length, axf->Update, &ctx);
755 	if (error)
756 		return (error);
757 
758 	exf->reinit(swe->sw_kschedule, iv);
759 
760 	/* Do encryption/decryption with MAC */
761 	crypto_cursor_init(&cc_in, &crp->crp_buf);
762 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
763 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
764 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
765 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
766 	} else
767 		cc_out = cc_in;
768 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
769 		if (crypto_cursor_seglen(&cc_in) < blksz) {
770 			crypto_cursor_copydata(&cc_in, blksz, blk);
771 			inblk = blk;
772 		} else {
773 			inblk = crypto_cursor_segbase(&cc_in);
774 			crypto_cursor_advance(&cc_in, blksz);
775 		}
776 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
777 			if (crypto_cursor_seglen(&cc_out) < blksz)
778 				outblk = blk;
779 			else
780 				outblk = crypto_cursor_segbase(&cc_out);
781 			axf->Update(&ctx, inblk, blksz);
782 			exf->encrypt(swe->sw_kschedule, inblk, outblk);
783 			if (outblk == blk)
784 				crypto_cursor_copyback(&cc_out, blksz, blk);
785 			else
786 				crypto_cursor_advance(&cc_out, blksz);
787 		} else {
788 			/*
789 			 * One of the problems with CCM+CBC is that
790 			 * the authentication is done on the
791 			 * unencrypted data.  As a result, we have to
792 			 * decrypt the data twice: once to generate
793 			 * the tag and a second time after the tag is
794 			 * verified.
795 			 */
796 			exf->decrypt(swe->sw_kschedule, inblk, blk);
797 			axf->Update(&ctx, blk, blksz);
798 		}
799 	}
800 	if (resid > 0) {
801 		crypto_cursor_copydata(&cc_in, resid, blk);
802 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
803 			axf->Update(&ctx, blk, resid);
804 			exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
805 			crypto_cursor_copyback(&cc_out, resid, blk);
806 		} else {
807 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
808 			axf->Update(&ctx, blk, resid);
809 		}
810 	}
811 
812 	/* Finalize MAC */
813 	axf->Final(tag, &ctx);
814 
815 	/* Validate tag */
816 	error = 0;
817 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
818 		u_char tag2[AES_CBC_MAC_HASH_LEN];
819 
820 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
821 		    tag2);
822 
823 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
824 		explicit_bzero(tag2, sizeof(tag2));
825 		if (r != 0) {
826 			error = EBADMSG;
827 			goto out;
828 		}
829 
830 		/* tag matches, decrypt data */
831 		exf->reinit(swe->sw_kschedule, iv);
832 		crypto_cursor_init(&cc_in, &crp->crp_buf);
833 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
834 		for (resid = crp->crp_payload_length; resid > blksz;
835 		     resid -= blksz) {
836 			if (crypto_cursor_seglen(&cc_in) < blksz) {
837 				crypto_cursor_copydata(&cc_in, blksz, blk);
838 				inblk = blk;
839 			} else {
840 				inblk = crypto_cursor_segbase(&cc_in);
841 				crypto_cursor_advance(&cc_in, blksz);
842 			}
843 			if (crypto_cursor_seglen(&cc_out) < blksz)
844 				outblk = blk;
845 			else
846 				outblk = crypto_cursor_segbase(&cc_out);
847 			exf->decrypt(swe->sw_kschedule, inblk, outblk);
848 			if (outblk == blk)
849 				crypto_cursor_copyback(&cc_out, blksz, blk);
850 			else
851 				crypto_cursor_advance(&cc_out, blksz);
852 		}
853 		if (resid > 0) {
854 			crypto_cursor_copydata(&cc_in, resid, blk);
855 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
856 			crypto_cursor_copyback(&cc_out, resid, blk);
857 		}
858 	} else {
859 		/* Inject the authentication data */
860 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
861 	}
862 
863 out:
864 	explicit_bzero(blkbuf, sizeof(blkbuf));
865 	explicit_bzero(tag, sizeof(tag));
866 	explicit_bzero(iv, sizeof(iv));
867 	return (error);
868 }
869 
870 /*
871  * Apply a cipher and a digest to perform EtA.
872  */
873 static int
874 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
875 {
876 	int error;
877 
878 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
879 		error = swcr_encdec(ses, crp);
880 		if (error == 0)
881 			error = swcr_authcompute(ses, crp);
882 	} else {
883 		error = swcr_authcompute(ses, crp);
884 		if (error == 0)
885 			error = swcr_encdec(ses, crp);
886 	}
887 	return (error);
888 }
889 
890 /*
891  * Apply a compression/decompression algorithm
892  */
893 static int
894 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
895 {
896 	uint8_t *data, *out;
897 	struct comp_algo *cxf;
898 	int adj;
899 	uint32_t result;
900 
901 	cxf = ses->swcr_compdec.sw_cxf;
902 
903 	/* We must handle the whole buffer of data in one time
904 	 * then if there is not all the data in the mbuf, we must
905 	 * copy in a buffer.
906 	 */
907 
908 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
909 	if (data == NULL)
910 		return (EINVAL);
911 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
912 	    data);
913 
914 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
915 		result = cxf->compress(data, crp->crp_payload_length, &out);
916 	else
917 		result = cxf->decompress(data, crp->crp_payload_length, &out);
918 
919 	free(data, M_CRYPTO_DATA);
920 	if (result == 0)
921 		return (EINVAL);
922 	crp->crp_olen = result;
923 
924 	/* Check the compressed size when doing compression */
925 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
926 		if (result >= crp->crp_payload_length) {
927 			/* Compression was useless, we lost time */
928 			free(out, M_CRYPTO_DATA);
929 			return (0);
930 		}
931 	}
932 
933 	/* Copy back the (de)compressed data. m_copyback is
934 	 * extending the mbuf as necessary.
935 	 */
936 	crypto_copyback(crp, crp->crp_payload_start, result, out);
937 	if (result < crp->crp_payload_length) {
938 		switch (crp->crp_buf.cb_type) {
939 		case CRYPTO_BUF_MBUF:
940 			adj = result - crp->crp_payload_length;
941 			m_adj(crp->crp_buf.cb_mbuf, adj);
942 			break;
943 		case CRYPTO_BUF_UIO: {
944 			struct uio *uio = crp->crp_buf.cb_uio;
945 			int ind;
946 
947 			adj = crp->crp_payload_length - result;
948 			ind = uio->uio_iovcnt - 1;
949 
950 			while (adj > 0 && ind >= 0) {
951 				if (adj < uio->uio_iov[ind].iov_len) {
952 					uio->uio_iov[ind].iov_len -= adj;
953 					break;
954 				}
955 
956 				adj -= uio->uio_iov[ind].iov_len;
957 				uio->uio_iov[ind].iov_len = 0;
958 				ind--;
959 				uio->uio_iovcnt--;
960 			}
961 			}
962 			break;
963 		case CRYPTO_BUF_VMPAGE:
964 			adj = crp->crp_payload_length - result;
965 			crp->crp_buf.cb_vm_page_len -= adj;
966 			break;
967 		default:
968 			break;
969 		}
970 	}
971 	free(out, M_CRYPTO_DATA);
972 	return 0;
973 }
974 
975 static int
976 swcr_setup_cipher(struct swcr_session *ses,
977     const struct crypto_session_params *csp)
978 {
979 	struct swcr_encdec *swe;
980 	struct enc_xform *txf;
981 	int error;
982 
983 	swe = &ses->swcr_encdec;
984 	txf = crypto_cipher(csp);
985 	MPASS(txf->ivsize == csp->csp_ivlen);
986 	if (txf->ctxsize != 0) {
987 		swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA,
988 		    M_NOWAIT);
989 		if (swe->sw_kschedule == NULL)
990 			return (ENOMEM);
991 	}
992 	if (csp->csp_cipher_key != NULL) {
993 		error = txf->setkey(swe->sw_kschedule,
994 		    csp->csp_cipher_key, csp->csp_cipher_klen);
995 		if (error)
996 			return (error);
997 	}
998 	swe->sw_exf = txf;
999 	return (0);
1000 }
1001 
1002 static int
1003 swcr_setup_auth(struct swcr_session *ses,
1004     const struct crypto_session_params *csp)
1005 {
1006 	struct swcr_auth *swa;
1007 	struct auth_hash *axf;
1008 
1009 	swa = &ses->swcr_auth;
1010 
1011 	axf = crypto_auth_hash(csp);
1012 	swa->sw_axf = axf;
1013 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1014 		return (EINVAL);
1015 	if (csp->csp_auth_mlen == 0)
1016 		swa->sw_mlen = axf->hashsize;
1017 	else
1018 		swa->sw_mlen = csp->csp_auth_mlen;
1019 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1020 	if (swa->sw_ictx == NULL)
1021 		return (ENOBUFS);
1022 
1023 	switch (csp->csp_auth_alg) {
1024 	case CRYPTO_SHA1_HMAC:
1025 	case CRYPTO_SHA2_224_HMAC:
1026 	case CRYPTO_SHA2_256_HMAC:
1027 	case CRYPTO_SHA2_384_HMAC:
1028 	case CRYPTO_SHA2_512_HMAC:
1029 	case CRYPTO_NULL_HMAC:
1030 	case CRYPTO_RIPEMD160_HMAC:
1031 		swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1032 		    M_NOWAIT);
1033 		if (swa->sw_octx == NULL)
1034 			return (ENOBUFS);
1035 
1036 		if (csp->csp_auth_key != NULL) {
1037 			swcr_authprepare(axf, swa, csp->csp_auth_key,
1038 			    csp->csp_auth_klen);
1039 		}
1040 
1041 		if (csp->csp_mode == CSP_MODE_DIGEST)
1042 			ses->swcr_process = swcr_authcompute;
1043 		break;
1044 	case CRYPTO_SHA1:
1045 	case CRYPTO_SHA2_224:
1046 	case CRYPTO_SHA2_256:
1047 	case CRYPTO_SHA2_384:
1048 	case CRYPTO_SHA2_512:
1049 		axf->Init(swa->sw_ictx);
1050 		if (csp->csp_mode == CSP_MODE_DIGEST)
1051 			ses->swcr_process = swcr_authcompute;
1052 		break;
1053 	case CRYPTO_AES_NIST_GMAC:
1054 		axf->Init(swa->sw_ictx);
1055 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1056 		    csp->csp_auth_klen);
1057 		if (csp->csp_mode == CSP_MODE_DIGEST)
1058 			ses->swcr_process = swcr_gmac;
1059 		break;
1060 	case CRYPTO_POLY1305:
1061 	case CRYPTO_BLAKE2B:
1062 	case CRYPTO_BLAKE2S:
1063 		/*
1064 		 * Blake2b and Blake2s support an optional key but do
1065 		 * not require one.
1066 		 */
1067 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
1068 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1069 			    csp->csp_auth_klen);
1070 		axf->Init(swa->sw_ictx);
1071 		if (csp->csp_mode == CSP_MODE_DIGEST)
1072 			ses->swcr_process = swcr_authcompute;
1073 		break;
1074 	case CRYPTO_AES_CCM_CBC_MAC:
1075 		axf->Init(swa->sw_ictx);
1076 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1077 		    csp->csp_auth_klen);
1078 		if (csp->csp_mode == CSP_MODE_DIGEST)
1079 			ses->swcr_process = swcr_ccm_cbc_mac;
1080 		break;
1081 	}
1082 
1083 	return (0);
1084 }
1085 
1086 static int
1087 swcr_setup_gcm(struct swcr_session *ses,
1088     const struct crypto_session_params *csp)
1089 {
1090 	struct swcr_auth *swa;
1091 	struct auth_hash *axf;
1092 
1093 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
1094 		return (EINVAL);
1095 
1096 	/* First, setup the auth side. */
1097 	swa = &ses->swcr_auth;
1098 	switch (csp->csp_cipher_klen * 8) {
1099 	case 128:
1100 		axf = &auth_hash_nist_gmac_aes_128;
1101 		break;
1102 	case 192:
1103 		axf = &auth_hash_nist_gmac_aes_192;
1104 		break;
1105 	case 256:
1106 		axf = &auth_hash_nist_gmac_aes_256;
1107 		break;
1108 	default:
1109 		return (EINVAL);
1110 	}
1111 	swa->sw_axf = axf;
1112 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1113 		return (EINVAL);
1114 	if (csp->csp_auth_mlen == 0)
1115 		swa->sw_mlen = axf->hashsize;
1116 	else
1117 		swa->sw_mlen = csp->csp_auth_mlen;
1118 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1119 	if (swa->sw_ictx == NULL)
1120 		return (ENOBUFS);
1121 	axf->Init(swa->sw_ictx);
1122 	if (csp->csp_cipher_key != NULL)
1123 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1124 		    csp->csp_cipher_klen);
1125 
1126 	/* Second, setup the cipher side. */
1127 	return (swcr_setup_cipher(ses, csp));
1128 }
1129 
1130 static int
1131 swcr_setup_ccm(struct swcr_session *ses,
1132     const struct crypto_session_params *csp)
1133 {
1134 	struct swcr_auth *swa;
1135 	struct auth_hash *axf;
1136 
1137 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1138 		return (EINVAL);
1139 
1140 	/* First, setup the auth side. */
1141 	swa = &ses->swcr_auth;
1142 	switch (csp->csp_cipher_klen * 8) {
1143 	case 128:
1144 		axf = &auth_hash_ccm_cbc_mac_128;
1145 		break;
1146 	case 192:
1147 		axf = &auth_hash_ccm_cbc_mac_192;
1148 		break;
1149 	case 256:
1150 		axf = &auth_hash_ccm_cbc_mac_256;
1151 		break;
1152 	default:
1153 		return (EINVAL);
1154 	}
1155 	swa->sw_axf = axf;
1156 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1157 		return (EINVAL);
1158 	if (csp->csp_auth_mlen == 0)
1159 		swa->sw_mlen = axf->hashsize;
1160 	else
1161 		swa->sw_mlen = csp->csp_auth_mlen;
1162 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1163 	if (swa->sw_ictx == NULL)
1164 		return (ENOBUFS);
1165 	axf->Init(swa->sw_ictx);
1166 	if (csp->csp_cipher_key != NULL)
1167 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1168 		    csp->csp_cipher_klen);
1169 
1170 	/* Second, setup the cipher side. */
1171 	return (swcr_setup_cipher(ses, csp));
1172 }
1173 
1174 static bool
1175 swcr_auth_supported(const struct crypto_session_params *csp)
1176 {
1177 	struct auth_hash *axf;
1178 
1179 	axf = crypto_auth_hash(csp);
1180 	if (axf == NULL)
1181 		return (false);
1182 	switch (csp->csp_auth_alg) {
1183 	case CRYPTO_SHA1_HMAC:
1184 	case CRYPTO_SHA2_224_HMAC:
1185 	case CRYPTO_SHA2_256_HMAC:
1186 	case CRYPTO_SHA2_384_HMAC:
1187 	case CRYPTO_SHA2_512_HMAC:
1188 	case CRYPTO_NULL_HMAC:
1189 	case CRYPTO_RIPEMD160_HMAC:
1190 		break;
1191 	case CRYPTO_AES_NIST_GMAC:
1192 		switch (csp->csp_auth_klen * 8) {
1193 		case 128:
1194 		case 192:
1195 		case 256:
1196 			break;
1197 		default:
1198 			return (false);
1199 		}
1200 		if (csp->csp_auth_key == NULL)
1201 			return (false);
1202 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1203 			return (false);
1204 		break;
1205 	case CRYPTO_POLY1305:
1206 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1207 			return (false);
1208 		break;
1209 	case CRYPTO_AES_CCM_CBC_MAC:
1210 		switch (csp->csp_auth_klen * 8) {
1211 		case 128:
1212 		case 192:
1213 		case 256:
1214 			break;
1215 		default:
1216 			return (false);
1217 		}
1218 		if (csp->csp_auth_key == NULL)
1219 			return (false);
1220 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1221 			return (false);
1222 		break;
1223 	}
1224 	return (true);
1225 }
1226 
1227 static bool
1228 swcr_cipher_supported(const struct crypto_session_params *csp)
1229 {
1230 	struct enc_xform *txf;
1231 
1232 	txf = crypto_cipher(csp);
1233 	if (txf == NULL)
1234 		return (false);
1235 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1236 	    txf->ivsize != csp->csp_ivlen)
1237 		return (false);
1238 	return (true);
1239 }
1240 
1241 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
1242 
1243 static int
1244 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1245 {
1246 	if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
1247 		return (EINVAL);
1248 	switch (csp->csp_mode) {
1249 	case CSP_MODE_COMPRESS:
1250 		switch (csp->csp_cipher_alg) {
1251 		case CRYPTO_DEFLATE_COMP:
1252 			break;
1253 		default:
1254 			return (EINVAL);
1255 		}
1256 		break;
1257 	case CSP_MODE_CIPHER:
1258 		switch (csp->csp_cipher_alg) {
1259 		case CRYPTO_AES_NIST_GCM_16:
1260 		case CRYPTO_AES_CCM_16:
1261 			return (EINVAL);
1262 		default:
1263 			if (!swcr_cipher_supported(csp))
1264 				return (EINVAL);
1265 			break;
1266 		}
1267 		break;
1268 	case CSP_MODE_DIGEST:
1269 		if (!swcr_auth_supported(csp))
1270 			return (EINVAL);
1271 		break;
1272 	case CSP_MODE_AEAD:
1273 		switch (csp->csp_cipher_alg) {
1274 		case CRYPTO_AES_NIST_GCM_16:
1275 		case CRYPTO_AES_CCM_16:
1276 			break;
1277 		default:
1278 			return (EINVAL);
1279 		}
1280 		break;
1281 	case CSP_MODE_ETA:
1282 		/* AEAD algorithms cannot be used for EtA. */
1283 		switch (csp->csp_cipher_alg) {
1284 		case CRYPTO_AES_NIST_GCM_16:
1285 		case CRYPTO_AES_CCM_16:
1286 			return (EINVAL);
1287 		}
1288 		switch (csp->csp_auth_alg) {
1289 		case CRYPTO_AES_NIST_GMAC:
1290 		case CRYPTO_AES_CCM_CBC_MAC:
1291 			return (EINVAL);
1292 		}
1293 
1294 		if (!swcr_cipher_supported(csp) ||
1295 		    !swcr_auth_supported(csp))
1296 			return (EINVAL);
1297 		break;
1298 	default:
1299 		return (EINVAL);
1300 	}
1301 
1302 	return (CRYPTODEV_PROBE_SOFTWARE);
1303 }
1304 
1305 /*
1306  * Generate a new software session.
1307  */
1308 static int
1309 swcr_newsession(device_t dev, crypto_session_t cses,
1310     const struct crypto_session_params *csp)
1311 {
1312 	struct swcr_session *ses;
1313 	struct swcr_encdec *swe;
1314 	struct swcr_auth *swa;
1315 	struct comp_algo *cxf;
1316 	int error;
1317 
1318 	ses = crypto_get_driver_session(cses);
1319 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1320 
1321 	error = 0;
1322 	swe = &ses->swcr_encdec;
1323 	swa = &ses->swcr_auth;
1324 	switch (csp->csp_mode) {
1325 	case CSP_MODE_COMPRESS:
1326 		switch (csp->csp_cipher_alg) {
1327 		case CRYPTO_DEFLATE_COMP:
1328 			cxf = &comp_algo_deflate;
1329 			break;
1330 #ifdef INVARIANTS
1331 		default:
1332 			panic("bad compression algo");
1333 #endif
1334 		}
1335 		ses->swcr_compdec.sw_cxf = cxf;
1336 		ses->swcr_process = swcr_compdec;
1337 		break;
1338 	case CSP_MODE_CIPHER:
1339 		switch (csp->csp_cipher_alg) {
1340 		case CRYPTO_NULL_CBC:
1341 			ses->swcr_process = swcr_null;
1342 			break;
1343 #ifdef INVARIANTS
1344 		case CRYPTO_AES_NIST_GCM_16:
1345 		case CRYPTO_AES_CCM_16:
1346 			panic("bad cipher algo");
1347 #endif
1348 		default:
1349 			error = swcr_setup_cipher(ses, csp);
1350 			if (error == 0)
1351 				ses->swcr_process = swcr_encdec;
1352 		}
1353 		break;
1354 	case CSP_MODE_DIGEST:
1355 		error = swcr_setup_auth(ses, csp);
1356 		break;
1357 	case CSP_MODE_AEAD:
1358 		switch (csp->csp_cipher_alg) {
1359 		case CRYPTO_AES_NIST_GCM_16:
1360 			error = swcr_setup_gcm(ses, csp);
1361 			if (error == 0)
1362 				ses->swcr_process = swcr_gcm;
1363 			break;
1364 		case CRYPTO_AES_CCM_16:
1365 			error = swcr_setup_ccm(ses, csp);
1366 			if (error == 0)
1367 				ses->swcr_process = swcr_ccm;
1368 			break;
1369 #ifdef INVARIANTS
1370 		default:
1371 			panic("bad aead algo");
1372 #endif
1373 		}
1374 		break;
1375 	case CSP_MODE_ETA:
1376 #ifdef INVARIANTS
1377 		switch (csp->csp_cipher_alg) {
1378 		case CRYPTO_AES_NIST_GCM_16:
1379 		case CRYPTO_AES_CCM_16:
1380 			panic("bad eta cipher algo");
1381 		}
1382 		switch (csp->csp_auth_alg) {
1383 		case CRYPTO_AES_NIST_GMAC:
1384 		case CRYPTO_AES_CCM_CBC_MAC:
1385 			panic("bad eta auth algo");
1386 		}
1387 #endif
1388 
1389 		error = swcr_setup_auth(ses, csp);
1390 		if (error)
1391 			break;
1392 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1393 			/* Effectively degrade to digest mode. */
1394 			ses->swcr_process = swcr_authcompute;
1395 			break;
1396 		}
1397 
1398 		error = swcr_setup_cipher(ses, csp);
1399 		if (error == 0)
1400 			ses->swcr_process = swcr_eta;
1401 		break;
1402 	default:
1403 		error = EINVAL;
1404 	}
1405 
1406 	if (error)
1407 		swcr_freesession(dev, cses);
1408 	return (error);
1409 }
1410 
1411 static void
1412 swcr_freesession(device_t dev, crypto_session_t cses)
1413 {
1414 	struct swcr_session *ses;
1415 
1416 	ses = crypto_get_driver_session(cses);
1417 
1418 	mtx_destroy(&ses->swcr_lock);
1419 
1420 	zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA);
1421 	zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
1422 	zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
1423 }
1424 
1425 /*
1426  * Process a software request.
1427  */
1428 static int
1429 swcr_process(device_t dev, struct cryptop *crp, int hint)
1430 {
1431 	struct swcr_session *ses;
1432 
1433 	ses = crypto_get_driver_session(crp->crp_session);
1434 	mtx_lock(&ses->swcr_lock);
1435 
1436 	crp->crp_etype = ses->swcr_process(ses, crp);
1437 
1438 	mtx_unlock(&ses->swcr_lock);
1439 	crypto_done(crp);
1440 	return (0);
1441 }
1442 
1443 static void
1444 swcr_identify(driver_t *drv, device_t parent)
1445 {
1446 	/* NB: order 10 is so we get attached after h/w devices */
1447 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1448 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1449 		panic("cryptosoft: could not attach");
1450 }
1451 
1452 static int
1453 swcr_probe(device_t dev)
1454 {
1455 	device_set_desc(dev, "software crypto");
1456 	return (BUS_PROBE_NOWILDCARD);
1457 }
1458 
1459 static int
1460 swcr_attach(device_t dev)
1461 {
1462 
1463 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1464 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1465 	if (swcr_id < 0) {
1466 		device_printf(dev, "cannot initialize!");
1467 		return (ENXIO);
1468 	}
1469 
1470 	return (0);
1471 }
1472 
1473 static int
1474 swcr_detach(device_t dev)
1475 {
1476 	crypto_unregister_all(swcr_id);
1477 	return 0;
1478 }
1479 
1480 static device_method_t swcr_methods[] = {
1481 	DEVMETHOD(device_identify,	swcr_identify),
1482 	DEVMETHOD(device_probe,		swcr_probe),
1483 	DEVMETHOD(device_attach,	swcr_attach),
1484 	DEVMETHOD(device_detach,	swcr_detach),
1485 
1486 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1487 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1488 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1489 	DEVMETHOD(cryptodev_process,	swcr_process),
1490 
1491 	{0, 0},
1492 };
1493 
1494 static driver_t swcr_driver = {
1495 	"cryptosoft",
1496 	swcr_methods,
1497 	0,		/* NB: no softc */
1498 };
1499 static devclass_t swcr_devclass;
1500 
1501 /*
1502  * NB: We explicitly reference the crypto module so we
1503  * get the necessary ordering when built as a loadable
1504  * module.  This is required because we bundle the crypto
1505  * module code together with the cryptosoft driver (otherwise
1506  * normal module dependencies would handle things).
1507  */
1508 extern int crypto_modevent(struct module *, int, void *);
1509 /* XXX where to attach */
1510 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1511 MODULE_VERSION(cryptosoft, 1);
1512 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1513