xref: /freebsd/sys/opencrypto/cryptosoft.c (revision dd41de95a84d979615a2ef11df6850622bf6184e)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Permission to use, copy, and modify this software with or without fee
20  * is hereby granted, provided that this entire notice is included in
21  * all source code copies of any software which is or includes a copy or
22  * modification of this software.
23  *
24  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
25  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
26  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
27  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
28  * PURPOSE.
29  */
30 
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/lock.h>
45 #include <sys/rwlock.h>
46 #include <sys/endian.h>
47 #include <sys/limits.h>
48 #include <sys/mutex.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 };
66 
67 struct swcr_encdec {
68 	void		*sw_kschedule;
69 	struct enc_xform *sw_exf;
70 };
71 
72 struct swcr_compdec {
73 	struct comp_algo *sw_cxf;
74 };
75 
76 struct swcr_session {
77 	struct mtx	swcr_lock;
78 	int	(*swcr_process)(struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	const struct crypto_session_params *csp;
106 	struct swcr_encdec *sw;
107 	struct enc_xform *exf;
108 	size_t inlen, outlen;
109 	int i, blks, ivlen, resid;
110 	struct crypto_buffer_cursor cc_in, cc_out;
111 	const unsigned char *inblk;
112 	unsigned char *outblk;
113 	int error;
114 	bool encrypting;
115 
116 	error = 0;
117 
118 	sw = &ses->swcr_encdec;
119 	exf = sw->sw_exf;
120 	ivlen = exf->ivsize;
121 
122 	if (exf->native_blocksize == 0) {
123 		/* Check for non-padded data */
124 		if ((crp->crp_payload_length % exf->blocksize) != 0)
125 			return (EINVAL);
126 
127 		blks = exf->blocksize;
128 	} else
129 		blks = exf->native_blocksize;
130 
131 	if (exf == &enc_xform_aes_icm &&
132 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
133 		return (EINVAL);
134 
135 	if (crp->crp_cipher_key != NULL) {
136 		csp = crypto_get_params(crp->crp_session);
137 		error = exf->setkey(sw->sw_kschedule,
138 		    crp->crp_cipher_key, csp->csp_cipher_klen);
139 		if (error)
140 			return (error);
141 	}
142 
143 	crypto_read_iv(crp, iv);
144 
145 	if (exf->reinit) {
146 		/*
147 		 * xforms that provide a reinit method perform all IV
148 		 * handling themselves.
149 		 */
150 		exf->reinit(sw->sw_kschedule, iv);
151 	}
152 
153 	ivp = iv;
154 
155 	crypto_cursor_init(&cc_in, &crp->crp_buf);
156 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
157 	inblk = crypto_cursor_segment(&cc_in, &inlen);
158 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
159 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
160 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
161 	} else
162 		cc_out = cc_in;
163 	outblk = crypto_cursor_segment(&cc_out, &outlen);
164 
165 	resid = crp->crp_payload_length;
166 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
167 
168 	/*
169 	 * Loop through encrypting blocks.  'inlen' is the remaining
170 	 * length of the current segment in the input buffer.
171 	 * 'outlen' is the remaining length of current segment in the
172 	 * output buffer.
173 	 */
174 	while (resid >= blks) {
175 		/*
176 		 * If the current block is not contained within the
177 		 * current input/output segment, use 'blk' as a local
178 		 * buffer.
179 		 */
180 		if (inlen < blks) {
181 			crypto_cursor_copydata(&cc_in, blks, blk);
182 			inblk = blk;
183 		}
184 		if (outlen < blks)
185 			outblk = blk;
186 
187 		/*
188 		 * Ciphers without a 'reinit' hook are assumed to be
189 		 * used in CBC mode where the chaining is done here.
190 		 */
191 		if (exf->reinit != NULL) {
192 			if (encrypting)
193 				exf->encrypt(sw->sw_kschedule, inblk, outblk);
194 			else
195 				exf->decrypt(sw->sw_kschedule, inblk, outblk);
196 		} else if (encrypting) {
197 			/* XOR with previous block */
198 			for (i = 0; i < blks; i++)
199 				outblk[i] = inblk[i] ^ ivp[i];
200 
201 			exf->encrypt(sw->sw_kschedule, outblk, outblk);
202 
203 			/*
204 			 * Keep encrypted block for XOR'ing
205 			 * with next block
206 			 */
207 			memcpy(iv, outblk, blks);
208 			ivp = iv;
209 		} else {	/* decrypt */
210 			/*
211 			 * Keep encrypted block for XOR'ing
212 			 * with next block
213 			 */
214 			nivp = (ivp == iv) ? iv2 : iv;
215 			memcpy(nivp, inblk, blks);
216 
217 			exf->decrypt(sw->sw_kschedule, inblk, outblk);
218 
219 			/* XOR with previous block */
220 			for (i = 0; i < blks; i++)
221 				outblk[i] ^= ivp[i];
222 
223 			ivp = nivp;
224 		}
225 
226 		if (inlen < blks) {
227 			inblk = crypto_cursor_segment(&cc_in, &inlen);
228 		} else {
229 			crypto_cursor_advance(&cc_in, blks);
230 			inlen -= blks;
231 			inblk += blks;
232 		}
233 
234 		if (outlen < blks) {
235 			crypto_cursor_copyback(&cc_out, blks, blk);
236 			outblk = crypto_cursor_segment(&cc_out, &outlen);
237 		} else {
238 			crypto_cursor_advance(&cc_out, blks);
239 			outlen -= blks;
240 			outblk += blks;
241 		}
242 
243 		resid -= blks;
244 	}
245 
246 	/* Handle trailing partial block for stream ciphers. */
247 	if (resid > 0) {
248 		KASSERT(exf->native_blocksize != 0,
249 		    ("%s: partial block of %d bytes for cipher %s",
250 		    __func__, i, exf->name));
251 		KASSERT(exf->reinit != NULL,
252 		    ("%s: partial block cipher %s without reinit hook",
253 		    __func__, exf->name));
254 		KASSERT(resid < blks, ("%s: partial block too big", __func__));
255 
256 		inblk = crypto_cursor_segment(&cc_in, &inlen);
257 		outblk = crypto_cursor_segment(&cc_out, &outlen);
258 		if (inlen < resid) {
259 			crypto_cursor_copydata(&cc_in, resid, blk);
260 			inblk = blk;
261 		}
262 		if (outlen < resid)
263 			outblk = blk;
264 		if (encrypting)
265 			exf->encrypt_last(sw->sw_kschedule, inblk, outblk,
266 			    resid);
267 		else
268 			exf->decrypt_last(sw->sw_kschedule, inblk, outblk,
269 			    resid);
270 		if (outlen < resid)
271 			crypto_cursor_copyback(&cc_out, resid, blk);
272 	}
273 
274 	explicit_bzero(blk, sizeof(blk));
275 	explicit_bzero(iv, sizeof(iv));
276 	explicit_bzero(iv2, sizeof(iv2));
277 	return (0);
278 }
279 
280 static void
281 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
282     const uint8_t *key, int klen)
283 {
284 
285 	switch (axf->type) {
286 	case CRYPTO_SHA1_HMAC:
287 	case CRYPTO_SHA2_224_HMAC:
288 	case CRYPTO_SHA2_256_HMAC:
289 	case CRYPTO_SHA2_384_HMAC:
290 	case CRYPTO_SHA2_512_HMAC:
291 	case CRYPTO_NULL_HMAC:
292 	case CRYPTO_RIPEMD160_HMAC:
293 		hmac_init_ipad(axf, key, klen, sw->sw_ictx);
294 		hmac_init_opad(axf, key, klen, sw->sw_octx);
295 		break;
296 	case CRYPTO_POLY1305:
297 	case CRYPTO_BLAKE2B:
298 	case CRYPTO_BLAKE2S:
299 		axf->Setkey(sw->sw_ictx, key, klen);
300 		axf->Init(sw->sw_ictx);
301 		break;
302 	default:
303 		panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
304 	}
305 }
306 
307 /*
308  * Compute or verify hash.
309  */
310 static int
311 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
312 {
313 	u_char aalg[HASH_MAX_LEN];
314 	const struct crypto_session_params *csp;
315 	struct swcr_auth *sw;
316 	struct auth_hash *axf;
317 	union authctx ctx;
318 	int err;
319 
320 	sw = &ses->swcr_auth;
321 
322 	axf = sw->sw_axf;
323 
324 	csp = crypto_get_params(crp->crp_session);
325 	if (crp->crp_auth_key != NULL) {
326 		swcr_authprepare(axf, sw, crp->crp_auth_key,
327 		    csp->csp_auth_klen);
328 	}
329 
330 	bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
331 
332 	if (crp->crp_aad != NULL)
333 		err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
334 	else
335 		err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
336 		    axf->Update, &ctx);
337 	if (err)
338 		goto out;
339 
340 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
341 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
342 		err = crypto_apply_buf(&crp->crp_obuf,
343 		    crp->crp_payload_output_start, crp->crp_payload_length,
344 		    axf->Update, &ctx);
345 	else
346 		err = crypto_apply(crp, crp->crp_payload_start,
347 		    crp->crp_payload_length, axf->Update, &ctx);
348 	if (err)
349 		goto out;
350 
351 	if (csp->csp_flags & CSP_F_ESN)
352 		axf->Update(&ctx, crp->crp_esn, 4);
353 
354 	axf->Final(aalg, &ctx);
355 	if (sw->sw_octx != NULL) {
356 		bcopy(sw->sw_octx, &ctx, axf->ctxsize);
357 		axf->Update(&ctx, aalg, axf->hashsize);
358 		axf->Final(aalg, &ctx);
359 	}
360 
361 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
362 		u_char uaalg[HASH_MAX_LEN];
363 
364 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
365 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
366 			err = EBADMSG;
367 		explicit_bzero(uaalg, sizeof(uaalg));
368 	} else {
369 		/* Inject the authentication data */
370 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
371 	}
372 	explicit_bzero(aalg, sizeof(aalg));
373 out:
374 	explicit_bzero(&ctx, sizeof(ctx));
375 	return (err);
376 }
377 
378 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
379 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
380 
381 static int
382 swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
383 {
384 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
385 	u_char *blk = (u_char *)blkbuf;
386 	u_char tag[GMAC_DIGEST_LEN];
387 	u_char iv[AES_BLOCK_LEN];
388 	struct crypto_buffer_cursor cc;
389 	const u_char *inblk;
390 	union authctx ctx;
391 	struct swcr_auth *swa;
392 	struct auth_hash *axf;
393 	uint32_t *blkp;
394 	size_t len;
395 	int blksz, error, ivlen, resid;
396 
397 	swa = &ses->swcr_auth;
398 	axf = swa->sw_axf;
399 
400 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
401 	blksz = GMAC_BLOCK_LEN;
402 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
403 	    __func__));
404 
405 	/* Initialize the IV */
406 	ivlen = AES_GCM_IV_LEN;
407 	crypto_read_iv(crp, iv);
408 
409 	axf->Reinit(&ctx, iv, ivlen);
410 	crypto_cursor_init(&cc, &crp->crp_buf);
411 	crypto_cursor_advance(&cc, crp->crp_payload_start);
412 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
413 		inblk = crypto_cursor_segment(&cc, &len);
414 		if (len >= blksz) {
415 			len = rounddown(MIN(len, resid), blksz);
416 			crypto_cursor_advance(&cc, len);
417 		} else {
418 			len = blksz;
419 			crypto_cursor_copydata(&cc, len, blk);
420 			inblk = blk;
421 		}
422 		axf->Update(&ctx, inblk, len);
423 	}
424 	if (resid > 0) {
425 		memset(blk, 0, blksz);
426 		crypto_cursor_copydata(&cc, resid, blk);
427 		axf->Update(&ctx, blk, blksz);
428 	}
429 
430 	/* length block */
431 	memset(blk, 0, blksz);
432 	blkp = (uint32_t *)blk + 1;
433 	*blkp = htobe32(crp->crp_payload_length * 8);
434 	axf->Update(&ctx, blk, blksz);
435 
436 	/* Finalize MAC */
437 	axf->Final(tag, &ctx);
438 
439 	error = 0;
440 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
441 		u_char tag2[GMAC_DIGEST_LEN];
442 
443 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
444 		    tag2);
445 		if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
446 			error = EBADMSG;
447 		explicit_bzero(tag2, sizeof(tag2));
448 	} else {
449 		/* Inject the authentication data */
450 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
451 	}
452 	explicit_bzero(blkbuf, sizeof(blkbuf));
453 	explicit_bzero(tag, sizeof(tag));
454 	explicit_bzero(iv, sizeof(iv));
455 	return (error);
456 }
457 
458 static int
459 swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
460 {
461 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
462 	u_char *blk = (u_char *)blkbuf;
463 	u_char tag[GMAC_DIGEST_LEN];
464 	u_char iv[AES_BLOCK_LEN];
465 	struct crypto_buffer_cursor cc_in, cc_out;
466 	const u_char *inblk;
467 	u_char *outblk;
468 	union authctx ctx;
469 	struct swcr_auth *swa;
470 	struct swcr_encdec *swe;
471 	struct auth_hash *axf;
472 	struct enc_xform *exf;
473 	uint32_t *blkp;
474 	size_t len;
475 	int blksz, error, ivlen, r, resid;
476 
477 	swa = &ses->swcr_auth;
478 	axf = swa->sw_axf;
479 
480 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
481 	blksz = GMAC_BLOCK_LEN;
482 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
483 	    __func__));
484 
485 	swe = &ses->swcr_encdec;
486 	exf = swe->sw_exf;
487 	KASSERT(axf->blocksize == exf->native_blocksize,
488 	    ("%s: blocksize mismatch", __func__));
489 
490 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
491 		return (EINVAL);
492 
493 	/* Initialize the IV */
494 	ivlen = AES_GCM_IV_LEN;
495 	bcopy(crp->crp_iv, iv, ivlen);
496 
497 	/* Supply MAC with IV */
498 	axf->Reinit(&ctx, iv, ivlen);
499 
500 	/* Supply MAC with AAD */
501 	if (crp->crp_aad != NULL) {
502 		len = rounddown(crp->crp_aad_length, blksz);
503 		if (len != 0)
504 			axf->Update(&ctx, crp->crp_aad, len);
505 		if (crp->crp_aad_length != len) {
506 			memset(blk, 0, blksz);
507 			memcpy(blk, (char *)crp->crp_aad + len,
508 			    crp->crp_aad_length - len);
509 			axf->Update(&ctx, blk, blksz);
510 		}
511 	} else {
512 		crypto_cursor_init(&cc_in, &crp->crp_buf);
513 		crypto_cursor_advance(&cc_in, crp->crp_aad_start);
514 		for (resid = crp->crp_aad_length; resid >= blksz;
515 		     resid -= len) {
516 			inblk = crypto_cursor_segment(&cc_in, &len);
517 			if (len >= blksz) {
518 				len = rounddown(MIN(len, resid), blksz);
519 				crypto_cursor_advance(&cc_in, len);
520 			} else {
521 				len = blksz;
522 				crypto_cursor_copydata(&cc_in, len, blk);
523 				inblk = blk;
524 			}
525 			axf->Update(&ctx, inblk, len);
526 		}
527 		if (resid > 0) {
528 			memset(blk, 0, blksz);
529 			crypto_cursor_copydata(&cc_in, resid, blk);
530 			axf->Update(&ctx, blk, blksz);
531 		}
532 	}
533 
534 	if (crp->crp_cipher_key != NULL)
535 		exf->setkey(swe->sw_kschedule, crp->crp_cipher_key,
536 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
537 	exf->reinit(swe->sw_kschedule, iv);
538 
539 	/* Do encryption with MAC */
540 	crypto_cursor_init(&cc_in, &crp->crp_buf);
541 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
542 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
543 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
544 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
545 	} else
546 		cc_out = cc_in;
547 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
548 		inblk = crypto_cursor_segment(&cc_in, &len);
549 		if (len < blksz) {
550 			crypto_cursor_copydata(&cc_in, blksz, blk);
551 			inblk = blk;
552 		} else {
553 			crypto_cursor_advance(&cc_in, blksz);
554 		}
555 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
556 			outblk = crypto_cursor_segment(&cc_out, &len);
557 			if (len < blksz)
558 				outblk = blk;
559 			exf->encrypt(swe->sw_kschedule, inblk, outblk);
560 			axf->Update(&ctx, outblk, blksz);
561 			if (outblk == blk)
562 				crypto_cursor_copyback(&cc_out, blksz, blk);
563 			else
564 				crypto_cursor_advance(&cc_out, blksz);
565 		} else {
566 			axf->Update(&ctx, inblk, blksz);
567 		}
568 	}
569 	if (resid > 0) {
570 		crypto_cursor_copydata(&cc_in, resid, blk);
571 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
572 			exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
573 			crypto_cursor_copyback(&cc_out, resid, blk);
574 		}
575 		axf->Update(&ctx, blk, resid);
576 	}
577 
578 	/* length block */
579 	memset(blk, 0, blksz);
580 	blkp = (uint32_t *)blk + 1;
581 	*blkp = htobe32(crp->crp_aad_length * 8);
582 	blkp = (uint32_t *)blk + 3;
583 	*blkp = htobe32(crp->crp_payload_length * 8);
584 	axf->Update(&ctx, blk, blksz);
585 
586 	/* Finalize MAC */
587 	axf->Final(tag, &ctx);
588 
589 	/* Validate tag */
590 	error = 0;
591 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
592 		u_char tag2[GMAC_DIGEST_LEN];
593 
594 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
595 
596 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
597 		explicit_bzero(tag2, sizeof(tag2));
598 		if (r != 0) {
599 			error = EBADMSG;
600 			goto out;
601 		}
602 
603 		/* tag matches, decrypt data */
604 		crypto_cursor_init(&cc_in, &crp->crp_buf);
605 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
606 		for (resid = crp->crp_payload_length; resid > blksz;
607 		     resid -= blksz) {
608 			inblk = crypto_cursor_segment(&cc_in, &len);
609 			if (len < blksz) {
610 				crypto_cursor_copydata(&cc_in, blksz, blk);
611 				inblk = blk;
612 			} else
613 				crypto_cursor_advance(&cc_in, blksz);
614 			outblk = crypto_cursor_segment(&cc_out, &len);
615 			if (len < blksz)
616 				outblk = blk;
617 			exf->decrypt(swe->sw_kschedule, inblk, outblk);
618 			if (outblk == blk)
619 				crypto_cursor_copyback(&cc_out, blksz, blk);
620 			else
621 				crypto_cursor_advance(&cc_out, blksz);
622 		}
623 		if (resid > 0) {
624 			crypto_cursor_copydata(&cc_in, resid, blk);
625 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
626 			crypto_cursor_copyback(&cc_out, resid, blk);
627 		}
628 	} else {
629 		/* Inject the authentication data */
630 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
631 	}
632 
633 out:
634 	explicit_bzero(blkbuf, sizeof(blkbuf));
635 	explicit_bzero(tag, sizeof(tag));
636 	explicit_bzero(iv, sizeof(iv));
637 
638 	return (error);
639 }
640 
641 static int
642 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
643 {
644 	u_char tag[AES_CBC_MAC_HASH_LEN];
645 	u_char iv[AES_BLOCK_LEN];
646 	union authctx ctx;
647 	struct swcr_auth *swa;
648 	struct auth_hash *axf;
649 	int error, ivlen;
650 
651 	swa = &ses->swcr_auth;
652 	axf = swa->sw_axf;
653 
654 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
655 
656 	/* Initialize the IV */
657 	ivlen = AES_CCM_IV_LEN;
658 	crypto_read_iv(crp, iv);
659 
660 	/*
661 	 * AES CCM-CBC-MAC needs to know the length of both the auth
662 	 * data and payload data before doing the auth computation.
663 	 */
664 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
665 	ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
666 
667 	axf->Reinit(&ctx, iv, ivlen);
668 	if (crp->crp_aad != NULL)
669 		error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
670 	else
671 		error = crypto_apply(crp, crp->crp_payload_start,
672 		    crp->crp_payload_length, axf->Update, &ctx);
673 	if (error)
674 		return (error);
675 
676 	/* Finalize MAC */
677 	axf->Final(tag, &ctx);
678 
679 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
680 		u_char tag2[AES_CBC_MAC_HASH_LEN];
681 
682 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
683 		    tag2);
684 		if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
685 			error = EBADMSG;
686 		explicit_bzero(tag2, sizeof(tag));
687 	} else {
688 		/* Inject the authentication data */
689 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
690 	}
691 	explicit_bzero(tag, sizeof(tag));
692 	explicit_bzero(iv, sizeof(iv));
693 	return (error);
694 }
695 
696 static int
697 swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
698 {
699 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
700 	u_char *blk = (u_char *)blkbuf;
701 	u_char tag[AES_CBC_MAC_HASH_LEN];
702 	u_char iv[AES_BLOCK_LEN];
703 	struct crypto_buffer_cursor cc_in, cc_out;
704 	const u_char *inblk;
705 	u_char *outblk;
706 	union authctx ctx;
707 	struct swcr_auth *swa;
708 	struct swcr_encdec *swe;
709 	struct auth_hash *axf;
710 	struct enc_xform *exf;
711 	size_t len;
712 	int blksz, error, ivlen, r, resid;
713 
714 	swa = &ses->swcr_auth;
715 	axf = swa->sw_axf;
716 
717 	bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
718 	blksz = AES_BLOCK_LEN;
719 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
720 	    __func__));
721 
722 	swe = &ses->swcr_encdec;
723 	exf = swe->sw_exf;
724 	KASSERT(axf->blocksize == exf->native_blocksize,
725 	    ("%s: blocksize mismatch", __func__));
726 
727 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
728 		return (EINVAL);
729 
730 	/* Initialize the IV */
731 	ivlen = AES_CCM_IV_LEN;
732 	bcopy(crp->crp_iv, iv, ivlen);
733 
734 	/*
735 	 * AES CCM-CBC-MAC needs to know the length of both the auth
736 	 * data and payload data before doing the auth computation.
737 	 */
738 	ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
739 	ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
740 
741 	/* Supply MAC with IV */
742 	axf->Reinit(&ctx, iv, ivlen);
743 
744 	/* Supply MAC with AAD */
745 	if (crp->crp_aad != NULL)
746 		error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
747 	else
748 		error = crypto_apply(crp, crp->crp_aad_start,
749 		    crp->crp_aad_length, axf->Update, &ctx);
750 	if (error)
751 		return (error);
752 
753 	if (crp->crp_cipher_key != NULL)
754 		exf->setkey(swe->sw_kschedule, crp->crp_cipher_key,
755 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
756 	exf->reinit(swe->sw_kschedule, iv);
757 
758 	/* Do encryption/decryption with MAC */
759 	crypto_cursor_init(&cc_in, &crp->crp_buf);
760 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
761 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
762 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
763 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
764 	} else
765 		cc_out = cc_in;
766 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
767 		inblk = crypto_cursor_segment(&cc_in, &len);
768 		if (len < blksz) {
769 			crypto_cursor_copydata(&cc_in, blksz, blk);
770 			inblk = blk;
771 		} else
772 			crypto_cursor_advance(&cc_in, blksz);
773 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
774 			outblk = crypto_cursor_segment(&cc_out, &len);
775 			if (len < blksz)
776 				outblk = blk;
777 			axf->Update(&ctx, inblk, blksz);
778 			exf->encrypt(swe->sw_kschedule, inblk, outblk);
779 			if (outblk == blk)
780 				crypto_cursor_copyback(&cc_out, blksz, blk);
781 			else
782 				crypto_cursor_advance(&cc_out, blksz);
783 		} else {
784 			/*
785 			 * One of the problems with CCM+CBC is that
786 			 * the authentication is done on the
787 			 * unencrypted data.  As a result, we have to
788 			 * decrypt the data twice: once to generate
789 			 * the tag and a second time after the tag is
790 			 * verified.
791 			 */
792 			exf->decrypt(swe->sw_kschedule, inblk, blk);
793 			axf->Update(&ctx, blk, blksz);
794 		}
795 	}
796 	if (resid > 0) {
797 		crypto_cursor_copydata(&cc_in, resid, blk);
798 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
799 			axf->Update(&ctx, blk, resid);
800 			exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
801 			crypto_cursor_copyback(&cc_out, resid, blk);
802 		} else {
803 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
804 			axf->Update(&ctx, blk, resid);
805 		}
806 	}
807 
808 	/* Finalize MAC */
809 	axf->Final(tag, &ctx);
810 
811 	/* Validate tag */
812 	error = 0;
813 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
814 		u_char tag2[AES_CBC_MAC_HASH_LEN];
815 
816 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
817 		    tag2);
818 
819 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
820 		explicit_bzero(tag2, sizeof(tag2));
821 		if (r != 0) {
822 			error = EBADMSG;
823 			goto out;
824 		}
825 
826 		/* tag matches, decrypt data */
827 		exf->reinit(swe->sw_kschedule, iv);
828 		crypto_cursor_init(&cc_in, &crp->crp_buf);
829 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
830 		for (resid = crp->crp_payload_length; resid > blksz;
831 		     resid -= blksz) {
832 			inblk = crypto_cursor_segment(&cc_in, &len);
833 			if (len < blksz) {
834 				crypto_cursor_copydata(&cc_in, blksz, blk);
835 				inblk = blk;
836 			} else
837 				crypto_cursor_advance(&cc_in, blksz);
838 			outblk = crypto_cursor_segment(&cc_out, &len);
839 			if (len < blksz)
840 				outblk = blk;
841 			exf->decrypt(swe->sw_kschedule, inblk, outblk);
842 			if (outblk == blk)
843 				crypto_cursor_copyback(&cc_out, blksz, blk);
844 			else
845 				crypto_cursor_advance(&cc_out, blksz);
846 		}
847 		if (resid > 0) {
848 			crypto_cursor_copydata(&cc_in, resid, blk);
849 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
850 			crypto_cursor_copyback(&cc_out, resid, blk);
851 		}
852 	} else {
853 		/* Inject the authentication data */
854 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
855 	}
856 
857 out:
858 	explicit_bzero(blkbuf, sizeof(blkbuf));
859 	explicit_bzero(tag, sizeof(tag));
860 	explicit_bzero(iv, sizeof(iv));
861 	return (error);
862 }
863 
864 static int
865 swcr_chacha20_poly1305(struct swcr_session *ses, struct cryptop *crp)
866 {
867 	const struct crypto_session_params *csp;
868 	uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))];
869 	u_char *blk = (u_char *)blkbuf;
870 	u_char tag[POLY1305_HASH_LEN];
871 	struct crypto_buffer_cursor cc_in, cc_out;
872 	const u_char *inblk;
873 	u_char *outblk;
874 	uint64_t *blkp;
875 	union authctx ctx;
876 	struct swcr_auth *swa;
877 	struct swcr_encdec *swe;
878 	struct auth_hash *axf;
879 	struct enc_xform *exf;
880 	size_t len;
881 	int blksz, error, r, resid;
882 
883 	swa = &ses->swcr_auth;
884 	axf = swa->sw_axf;
885 
886 	swe = &ses->swcr_encdec;
887 	exf = swe->sw_exf;
888 	blksz = exf->native_blocksize;
889 	KASSERT(blksz <= sizeof(blkbuf), ("%s: blocksize mismatch", __func__));
890 
891 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
892 		return (EINVAL);
893 
894 	csp = crypto_get_params(crp->crp_session);
895 
896 	/* Generate Poly1305 key. */
897 	if (crp->crp_cipher_key != NULL)
898 		axf->Setkey(&ctx, crp->crp_cipher_key, csp->csp_cipher_klen);
899 	else
900 		axf->Setkey(&ctx, csp->csp_cipher_key, csp->csp_cipher_klen);
901 	axf->Reinit(&ctx, crp->crp_iv, csp->csp_ivlen);
902 
903 	/* Supply MAC with AAD */
904 	if (crp->crp_aad != NULL)
905 		axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
906 	else
907 		crypto_apply(crp, crp->crp_aad_start,
908 		    crp->crp_aad_length, axf->Update, &ctx);
909 	if (crp->crp_aad_length % 16 != 0) {
910 		/* padding1 */
911 		memset(blk, 0, 16);
912 		axf->Update(&ctx, blk, 16 - crp->crp_aad_length % 16);
913 	}
914 
915 	if (crp->crp_cipher_key != NULL)
916 		exf->setkey(swe->sw_kschedule, crp->crp_cipher_key,
917 		    csp->csp_cipher_klen);
918 	exf->reinit(swe->sw_kschedule, crp->crp_iv);
919 
920 	/* Do encryption with MAC */
921 	crypto_cursor_init(&cc_in, &crp->crp_buf);
922 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
923 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
924 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
925 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
926 	} else
927 		cc_out = cc_in;
928 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
929 		inblk = crypto_cursor_segment(&cc_in, &len);
930 		if (len < blksz) {
931 			crypto_cursor_copydata(&cc_in, blksz, blk);
932 			inblk = blk;
933 		} else
934 			crypto_cursor_advance(&cc_in, blksz);
935 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
936 			outblk = crypto_cursor_segment(&cc_out, &len);
937 			if (len < blksz)
938 				outblk = blk;
939 			exf->encrypt(swe->sw_kschedule, inblk, outblk);
940 			axf->Update(&ctx, outblk, blksz);
941 			if (outblk == blk)
942 				crypto_cursor_copyback(&cc_out, blksz, blk);
943 			else
944 				crypto_cursor_advance(&cc_out, blksz);
945 		} else {
946 			axf->Update(&ctx, inblk, blksz);
947 		}
948 	}
949 	if (resid > 0) {
950 		crypto_cursor_copydata(&cc_in, resid, blk);
951 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
952 			exf->encrypt_last(swe->sw_kschedule, blk, blk, resid);
953 			crypto_cursor_copyback(&cc_out, resid, blk);
954 		}
955 		axf->Update(&ctx, blk, resid);
956 		if (resid % 16 != 0) {
957 			/* padding2 */
958 			memset(blk, 0, 16);
959 			axf->Update(&ctx, blk, 16 - resid % 16);
960 		}
961 	}
962 
963 	/* lengths */
964 	blkp = (uint64_t *)blk;
965 	blkp[0] = htole64(crp->crp_aad_length);
966 	blkp[1] = htole64(crp->crp_payload_length);
967 	axf->Update(&ctx, blk, sizeof(uint64_t) * 2);
968 
969 	/* Finalize MAC */
970 	axf->Final(tag, &ctx);
971 
972 	/* Validate tag */
973 	error = 0;
974 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
975 		u_char tag2[POLY1305_HASH_LEN];
976 
977 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
978 
979 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
980 		explicit_bzero(tag2, sizeof(tag2));
981 		if (r != 0) {
982 			error = EBADMSG;
983 			goto out;
984 		}
985 
986 		/* tag matches, decrypt data */
987 		crypto_cursor_init(&cc_in, &crp->crp_buf);
988 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
989 		for (resid = crp->crp_payload_length; resid > blksz;
990 		     resid -= blksz) {
991 			inblk = crypto_cursor_segment(&cc_in, &len);
992 			if (len < blksz) {
993 				crypto_cursor_copydata(&cc_in, blksz, blk);
994 				inblk = blk;
995 			} else
996 				crypto_cursor_advance(&cc_in, blksz);
997 			outblk = crypto_cursor_segment(&cc_out, &len);
998 			if (len < blksz)
999 				outblk = blk;
1000 			exf->decrypt(swe->sw_kschedule, inblk, outblk);
1001 			if (outblk == blk)
1002 				crypto_cursor_copyback(&cc_out, blksz, blk);
1003 			else
1004 				crypto_cursor_advance(&cc_out, blksz);
1005 		}
1006 		if (resid > 0) {
1007 			crypto_cursor_copydata(&cc_in, resid, blk);
1008 			exf->decrypt_last(swe->sw_kschedule, blk, blk, resid);
1009 			crypto_cursor_copyback(&cc_out, resid, blk);
1010 		}
1011 	} else {
1012 		/* Inject the authentication data */
1013 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
1014 	}
1015 
1016 out:
1017 	explicit_bzero(blkbuf, sizeof(blkbuf));
1018 	explicit_bzero(tag, sizeof(tag));
1019 	explicit_bzero(&ctx, sizeof(ctx));
1020 	return (error);
1021 }
1022 
1023 /*
1024  * Apply a cipher and a digest to perform EtA.
1025  */
1026 static int
1027 swcr_eta(struct swcr_session *ses, struct cryptop *crp)
1028 {
1029 	int error;
1030 
1031 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1032 		error = swcr_encdec(ses, crp);
1033 		if (error == 0)
1034 			error = swcr_authcompute(ses, crp);
1035 	} else {
1036 		error = swcr_authcompute(ses, crp);
1037 		if (error == 0)
1038 			error = swcr_encdec(ses, crp);
1039 	}
1040 	return (error);
1041 }
1042 
1043 /*
1044  * Apply a compression/decompression algorithm
1045  */
1046 static int
1047 swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
1048 {
1049 	uint8_t *data, *out;
1050 	struct comp_algo *cxf;
1051 	int adj;
1052 	uint32_t result;
1053 
1054 	cxf = ses->swcr_compdec.sw_cxf;
1055 
1056 	/* We must handle the whole buffer of data in one time
1057 	 * then if there is not all the data in the mbuf, we must
1058 	 * copy in a buffer.
1059 	 */
1060 
1061 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
1062 	if (data == NULL)
1063 		return (EINVAL);
1064 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
1065 	    data);
1066 
1067 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
1068 		result = cxf->compress(data, crp->crp_payload_length, &out);
1069 	else
1070 		result = cxf->decompress(data, crp->crp_payload_length, &out);
1071 
1072 	free(data, M_CRYPTO_DATA);
1073 	if (result == 0)
1074 		return (EINVAL);
1075 	crp->crp_olen = result;
1076 
1077 	/* Check the compressed size when doing compression */
1078 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
1079 		if (result >= crp->crp_payload_length) {
1080 			/* Compression was useless, we lost time */
1081 			free(out, M_CRYPTO_DATA);
1082 			return (0);
1083 		}
1084 	}
1085 
1086 	/* Copy back the (de)compressed data. m_copyback is
1087 	 * extending the mbuf as necessary.
1088 	 */
1089 	crypto_copyback(crp, crp->crp_payload_start, result, out);
1090 	if (result < crp->crp_payload_length) {
1091 		switch (crp->crp_buf.cb_type) {
1092 		case CRYPTO_BUF_MBUF:
1093 		case CRYPTO_BUF_SINGLE_MBUF:
1094 			adj = result - crp->crp_payload_length;
1095 			m_adj(crp->crp_buf.cb_mbuf, adj);
1096 			break;
1097 		case CRYPTO_BUF_UIO: {
1098 			struct uio *uio = crp->crp_buf.cb_uio;
1099 			int ind;
1100 
1101 			adj = crp->crp_payload_length - result;
1102 			ind = uio->uio_iovcnt - 1;
1103 
1104 			while (adj > 0 && ind >= 0) {
1105 				if (adj < uio->uio_iov[ind].iov_len) {
1106 					uio->uio_iov[ind].iov_len -= adj;
1107 					break;
1108 				}
1109 
1110 				adj -= uio->uio_iov[ind].iov_len;
1111 				uio->uio_iov[ind].iov_len = 0;
1112 				ind--;
1113 				uio->uio_iovcnt--;
1114 			}
1115 			}
1116 			break;
1117 		case CRYPTO_BUF_VMPAGE:
1118 			adj = crp->crp_payload_length - result;
1119 			crp->crp_buf.cb_vm_page_len -= adj;
1120 			break;
1121 		default:
1122 			break;
1123 		}
1124 	}
1125 	free(out, M_CRYPTO_DATA);
1126 	return 0;
1127 }
1128 
1129 static int
1130 swcr_setup_cipher(struct swcr_session *ses,
1131     const struct crypto_session_params *csp)
1132 {
1133 	struct swcr_encdec *swe;
1134 	struct enc_xform *txf;
1135 	int error;
1136 
1137 	swe = &ses->swcr_encdec;
1138 	txf = crypto_cipher(csp);
1139 	MPASS(txf->ivsize == csp->csp_ivlen);
1140 	if (txf->ctxsize != 0) {
1141 		swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA,
1142 		    M_NOWAIT);
1143 		if (swe->sw_kschedule == NULL)
1144 			return (ENOMEM);
1145 	}
1146 	if (csp->csp_cipher_key != NULL) {
1147 		error = txf->setkey(swe->sw_kschedule,
1148 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1149 		if (error)
1150 			return (error);
1151 	}
1152 	swe->sw_exf = txf;
1153 	return (0);
1154 }
1155 
1156 static int
1157 swcr_setup_auth(struct swcr_session *ses,
1158     const struct crypto_session_params *csp)
1159 {
1160 	struct swcr_auth *swa;
1161 	struct auth_hash *axf;
1162 
1163 	swa = &ses->swcr_auth;
1164 
1165 	axf = crypto_auth_hash(csp);
1166 	swa->sw_axf = axf;
1167 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1168 		return (EINVAL);
1169 	if (csp->csp_auth_mlen == 0)
1170 		swa->sw_mlen = axf->hashsize;
1171 	else
1172 		swa->sw_mlen = csp->csp_auth_mlen;
1173 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1174 	if (swa->sw_ictx == NULL)
1175 		return (ENOBUFS);
1176 
1177 	switch (csp->csp_auth_alg) {
1178 	case CRYPTO_SHA1_HMAC:
1179 	case CRYPTO_SHA2_224_HMAC:
1180 	case CRYPTO_SHA2_256_HMAC:
1181 	case CRYPTO_SHA2_384_HMAC:
1182 	case CRYPTO_SHA2_512_HMAC:
1183 	case CRYPTO_NULL_HMAC:
1184 	case CRYPTO_RIPEMD160_HMAC:
1185 		swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1186 		    M_NOWAIT);
1187 		if (swa->sw_octx == NULL)
1188 			return (ENOBUFS);
1189 
1190 		if (csp->csp_auth_key != NULL) {
1191 			swcr_authprepare(axf, swa, csp->csp_auth_key,
1192 			    csp->csp_auth_klen);
1193 		}
1194 
1195 		if (csp->csp_mode == CSP_MODE_DIGEST)
1196 			ses->swcr_process = swcr_authcompute;
1197 		break;
1198 	case CRYPTO_SHA1:
1199 	case CRYPTO_SHA2_224:
1200 	case CRYPTO_SHA2_256:
1201 	case CRYPTO_SHA2_384:
1202 	case CRYPTO_SHA2_512:
1203 		axf->Init(swa->sw_ictx);
1204 		if (csp->csp_mode == CSP_MODE_DIGEST)
1205 			ses->swcr_process = swcr_authcompute;
1206 		break;
1207 	case CRYPTO_AES_NIST_GMAC:
1208 		axf->Init(swa->sw_ictx);
1209 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1210 		    csp->csp_auth_klen);
1211 		if (csp->csp_mode == CSP_MODE_DIGEST)
1212 			ses->swcr_process = swcr_gmac;
1213 		break;
1214 	case CRYPTO_POLY1305:
1215 	case CRYPTO_BLAKE2B:
1216 	case CRYPTO_BLAKE2S:
1217 		/*
1218 		 * Blake2b and Blake2s support an optional key but do
1219 		 * not require one.
1220 		 */
1221 		if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
1222 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1223 			    csp->csp_auth_klen);
1224 		axf->Init(swa->sw_ictx);
1225 		if (csp->csp_mode == CSP_MODE_DIGEST)
1226 			ses->swcr_process = swcr_authcompute;
1227 		break;
1228 	case CRYPTO_AES_CCM_CBC_MAC:
1229 		axf->Init(swa->sw_ictx);
1230 		axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1231 		    csp->csp_auth_klen);
1232 		if (csp->csp_mode == CSP_MODE_DIGEST)
1233 			ses->swcr_process = swcr_ccm_cbc_mac;
1234 		break;
1235 	}
1236 
1237 	return (0);
1238 }
1239 
1240 static int
1241 swcr_setup_gcm(struct swcr_session *ses,
1242     const struct crypto_session_params *csp)
1243 {
1244 	struct swcr_auth *swa;
1245 	struct auth_hash *axf;
1246 
1247 	if (csp->csp_ivlen != AES_GCM_IV_LEN)
1248 		return (EINVAL);
1249 
1250 	/* First, setup the auth side. */
1251 	swa = &ses->swcr_auth;
1252 	switch (csp->csp_cipher_klen * 8) {
1253 	case 128:
1254 		axf = &auth_hash_nist_gmac_aes_128;
1255 		break;
1256 	case 192:
1257 		axf = &auth_hash_nist_gmac_aes_192;
1258 		break;
1259 	case 256:
1260 		axf = &auth_hash_nist_gmac_aes_256;
1261 		break;
1262 	default:
1263 		return (EINVAL);
1264 	}
1265 	swa->sw_axf = axf;
1266 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1267 		return (EINVAL);
1268 	if (csp->csp_auth_mlen == 0)
1269 		swa->sw_mlen = axf->hashsize;
1270 	else
1271 		swa->sw_mlen = csp->csp_auth_mlen;
1272 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1273 	if (swa->sw_ictx == NULL)
1274 		return (ENOBUFS);
1275 	axf->Init(swa->sw_ictx);
1276 	if (csp->csp_cipher_key != NULL)
1277 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1278 		    csp->csp_cipher_klen);
1279 
1280 	/* Second, setup the cipher side. */
1281 	return (swcr_setup_cipher(ses, csp));
1282 }
1283 
1284 static int
1285 swcr_setup_ccm(struct swcr_session *ses,
1286     const struct crypto_session_params *csp)
1287 {
1288 	struct swcr_auth *swa;
1289 	struct auth_hash *axf;
1290 
1291 	if (csp->csp_ivlen != AES_CCM_IV_LEN)
1292 		return (EINVAL);
1293 
1294 	/* First, setup the auth side. */
1295 	swa = &ses->swcr_auth;
1296 	switch (csp->csp_cipher_klen * 8) {
1297 	case 128:
1298 		axf = &auth_hash_ccm_cbc_mac_128;
1299 		break;
1300 	case 192:
1301 		axf = &auth_hash_ccm_cbc_mac_192;
1302 		break;
1303 	case 256:
1304 		axf = &auth_hash_ccm_cbc_mac_256;
1305 		break;
1306 	default:
1307 		return (EINVAL);
1308 	}
1309 	swa->sw_axf = axf;
1310 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1311 		return (EINVAL);
1312 	if (csp->csp_auth_mlen == 0)
1313 		swa->sw_mlen = axf->hashsize;
1314 	else
1315 		swa->sw_mlen = csp->csp_auth_mlen;
1316 	swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
1317 	if (swa->sw_ictx == NULL)
1318 		return (ENOBUFS);
1319 	axf->Init(swa->sw_ictx);
1320 	if (csp->csp_cipher_key != NULL)
1321 		axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
1322 		    csp->csp_cipher_klen);
1323 
1324 	/* Second, setup the cipher side. */
1325 	return (swcr_setup_cipher(ses, csp));
1326 }
1327 
1328 static int
1329 swcr_setup_chacha20_poly1305(struct swcr_session *ses,
1330     const struct crypto_session_params *csp)
1331 {
1332 	struct swcr_auth *swa;
1333 	struct auth_hash *axf;
1334 
1335 	if (csp->csp_ivlen != CHACHA20_POLY1305_IV_LEN)
1336 		return (EINVAL);
1337 
1338 	/* First, setup the auth side. */
1339 	swa = &ses->swcr_auth;
1340 	axf = &auth_hash_chacha20_poly1305;
1341 	swa->sw_axf = axf;
1342 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1343 		return (EINVAL);
1344 	if (csp->csp_auth_mlen == 0)
1345 		swa->sw_mlen = axf->hashsize;
1346 	else
1347 		swa->sw_mlen = csp->csp_auth_mlen;
1348 
1349 	/* The auth state is regenerated for each nonce. */
1350 
1351 	/* Second, setup the cipher side. */
1352 	return (swcr_setup_cipher(ses, csp));
1353 }
1354 
1355 static bool
1356 swcr_auth_supported(const struct crypto_session_params *csp)
1357 {
1358 	struct auth_hash *axf;
1359 
1360 	axf = crypto_auth_hash(csp);
1361 	if (axf == NULL)
1362 		return (false);
1363 	switch (csp->csp_auth_alg) {
1364 	case CRYPTO_SHA1_HMAC:
1365 	case CRYPTO_SHA2_224_HMAC:
1366 	case CRYPTO_SHA2_256_HMAC:
1367 	case CRYPTO_SHA2_384_HMAC:
1368 	case CRYPTO_SHA2_512_HMAC:
1369 	case CRYPTO_NULL_HMAC:
1370 	case CRYPTO_RIPEMD160_HMAC:
1371 		break;
1372 	case CRYPTO_AES_NIST_GMAC:
1373 		switch (csp->csp_auth_klen * 8) {
1374 		case 128:
1375 		case 192:
1376 		case 256:
1377 			break;
1378 		default:
1379 			return (false);
1380 		}
1381 		if (csp->csp_auth_key == NULL)
1382 			return (false);
1383 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1384 			return (false);
1385 		break;
1386 	case CRYPTO_POLY1305:
1387 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1388 			return (false);
1389 		break;
1390 	case CRYPTO_AES_CCM_CBC_MAC:
1391 		switch (csp->csp_auth_klen * 8) {
1392 		case 128:
1393 		case 192:
1394 		case 256:
1395 			break;
1396 		default:
1397 			return (false);
1398 		}
1399 		if (csp->csp_auth_key == NULL)
1400 			return (false);
1401 		if (csp->csp_ivlen != AES_CCM_IV_LEN)
1402 			return (false);
1403 		break;
1404 	}
1405 	return (true);
1406 }
1407 
1408 static bool
1409 swcr_cipher_supported(const struct crypto_session_params *csp)
1410 {
1411 	struct enc_xform *txf;
1412 
1413 	txf = crypto_cipher(csp);
1414 	if (txf == NULL)
1415 		return (false);
1416 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1417 	    txf->ivsize != csp->csp_ivlen)
1418 		return (false);
1419 	return (true);
1420 }
1421 
1422 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
1423 
1424 static int
1425 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1426 {
1427 	if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
1428 		return (EINVAL);
1429 	switch (csp->csp_mode) {
1430 	case CSP_MODE_COMPRESS:
1431 		switch (csp->csp_cipher_alg) {
1432 		case CRYPTO_DEFLATE_COMP:
1433 			break;
1434 		default:
1435 			return (EINVAL);
1436 		}
1437 		break;
1438 	case CSP_MODE_CIPHER:
1439 		switch (csp->csp_cipher_alg) {
1440 		case CRYPTO_AES_NIST_GCM_16:
1441 		case CRYPTO_AES_CCM_16:
1442 		case CRYPTO_CHACHA20_POLY1305:
1443 			return (EINVAL);
1444 		default:
1445 			if (!swcr_cipher_supported(csp))
1446 				return (EINVAL);
1447 			break;
1448 		}
1449 		break;
1450 	case CSP_MODE_DIGEST:
1451 		if (!swcr_auth_supported(csp))
1452 			return (EINVAL);
1453 		break;
1454 	case CSP_MODE_AEAD:
1455 		switch (csp->csp_cipher_alg) {
1456 		case CRYPTO_AES_NIST_GCM_16:
1457 		case CRYPTO_AES_CCM_16:
1458 		case CRYPTO_CHACHA20_POLY1305:
1459 			break;
1460 		default:
1461 			return (EINVAL);
1462 		}
1463 		break;
1464 	case CSP_MODE_ETA:
1465 		/* AEAD algorithms cannot be used for EtA. */
1466 		switch (csp->csp_cipher_alg) {
1467 		case CRYPTO_AES_NIST_GCM_16:
1468 		case CRYPTO_AES_CCM_16:
1469 		case CRYPTO_CHACHA20_POLY1305:
1470 			return (EINVAL);
1471 		}
1472 		switch (csp->csp_auth_alg) {
1473 		case CRYPTO_AES_NIST_GMAC:
1474 		case CRYPTO_AES_CCM_CBC_MAC:
1475 			return (EINVAL);
1476 		}
1477 
1478 		if (!swcr_cipher_supported(csp) ||
1479 		    !swcr_auth_supported(csp))
1480 			return (EINVAL);
1481 		break;
1482 	default:
1483 		return (EINVAL);
1484 	}
1485 
1486 	return (CRYPTODEV_PROBE_SOFTWARE);
1487 }
1488 
1489 /*
1490  * Generate a new software session.
1491  */
1492 static int
1493 swcr_newsession(device_t dev, crypto_session_t cses,
1494     const struct crypto_session_params *csp)
1495 {
1496 	struct swcr_session *ses;
1497 	struct swcr_encdec *swe;
1498 	struct swcr_auth *swa;
1499 	struct comp_algo *cxf;
1500 	int error;
1501 
1502 	ses = crypto_get_driver_session(cses);
1503 	mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
1504 
1505 	error = 0;
1506 	swe = &ses->swcr_encdec;
1507 	swa = &ses->swcr_auth;
1508 	switch (csp->csp_mode) {
1509 	case CSP_MODE_COMPRESS:
1510 		switch (csp->csp_cipher_alg) {
1511 		case CRYPTO_DEFLATE_COMP:
1512 			cxf = &comp_algo_deflate;
1513 			break;
1514 #ifdef INVARIANTS
1515 		default:
1516 			panic("bad compression algo");
1517 #endif
1518 		}
1519 		ses->swcr_compdec.sw_cxf = cxf;
1520 		ses->swcr_process = swcr_compdec;
1521 		break;
1522 	case CSP_MODE_CIPHER:
1523 		switch (csp->csp_cipher_alg) {
1524 		case CRYPTO_NULL_CBC:
1525 			ses->swcr_process = swcr_null;
1526 			break;
1527 #ifdef INVARIANTS
1528 		case CRYPTO_AES_NIST_GCM_16:
1529 		case CRYPTO_AES_CCM_16:
1530 		case CRYPTO_CHACHA20_POLY1305:
1531 			panic("bad cipher algo");
1532 #endif
1533 		default:
1534 			error = swcr_setup_cipher(ses, csp);
1535 			if (error == 0)
1536 				ses->swcr_process = swcr_encdec;
1537 		}
1538 		break;
1539 	case CSP_MODE_DIGEST:
1540 		error = swcr_setup_auth(ses, csp);
1541 		break;
1542 	case CSP_MODE_AEAD:
1543 		switch (csp->csp_cipher_alg) {
1544 		case CRYPTO_AES_NIST_GCM_16:
1545 			error = swcr_setup_gcm(ses, csp);
1546 			if (error == 0)
1547 				ses->swcr_process = swcr_gcm;
1548 			break;
1549 		case CRYPTO_AES_CCM_16:
1550 			error = swcr_setup_ccm(ses, csp);
1551 			if (error == 0)
1552 				ses->swcr_process = swcr_ccm;
1553 			break;
1554 		case CRYPTO_CHACHA20_POLY1305:
1555 			error = swcr_setup_chacha20_poly1305(ses, csp);
1556 			if (error == 0)
1557 				ses->swcr_process = swcr_chacha20_poly1305;
1558 			break;
1559 #ifdef INVARIANTS
1560 		default:
1561 			panic("bad aead algo");
1562 #endif
1563 		}
1564 		break;
1565 	case CSP_MODE_ETA:
1566 #ifdef INVARIANTS
1567 		switch (csp->csp_cipher_alg) {
1568 		case CRYPTO_AES_NIST_GCM_16:
1569 		case CRYPTO_AES_CCM_16:
1570 		case CRYPTO_CHACHA20_POLY1305:
1571 			panic("bad eta cipher algo");
1572 		}
1573 		switch (csp->csp_auth_alg) {
1574 		case CRYPTO_AES_NIST_GMAC:
1575 		case CRYPTO_AES_CCM_CBC_MAC:
1576 			panic("bad eta auth algo");
1577 		}
1578 #endif
1579 
1580 		error = swcr_setup_auth(ses, csp);
1581 		if (error)
1582 			break;
1583 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1584 			/* Effectively degrade to digest mode. */
1585 			ses->swcr_process = swcr_authcompute;
1586 			break;
1587 		}
1588 
1589 		error = swcr_setup_cipher(ses, csp);
1590 		if (error == 0)
1591 			ses->swcr_process = swcr_eta;
1592 		break;
1593 	default:
1594 		error = EINVAL;
1595 	}
1596 
1597 	if (error)
1598 		swcr_freesession(dev, cses);
1599 	return (error);
1600 }
1601 
1602 static void
1603 swcr_freesession(device_t dev, crypto_session_t cses)
1604 {
1605 	struct swcr_session *ses;
1606 
1607 	ses = crypto_get_driver_session(cses);
1608 
1609 	mtx_destroy(&ses->swcr_lock);
1610 
1611 	zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA);
1612 	zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
1613 	zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
1614 }
1615 
1616 /*
1617  * Process a software request.
1618  */
1619 static int
1620 swcr_process(device_t dev, struct cryptop *crp, int hint)
1621 {
1622 	struct swcr_session *ses;
1623 
1624 	ses = crypto_get_driver_session(crp->crp_session);
1625 	mtx_lock(&ses->swcr_lock);
1626 
1627 	crp->crp_etype = ses->swcr_process(ses, crp);
1628 
1629 	mtx_unlock(&ses->swcr_lock);
1630 	crypto_done(crp);
1631 	return (0);
1632 }
1633 
1634 static void
1635 swcr_identify(driver_t *drv, device_t parent)
1636 {
1637 	/* NB: order 10 is so we get attached after h/w devices */
1638 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1639 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1640 		panic("cryptosoft: could not attach");
1641 }
1642 
1643 static int
1644 swcr_probe(device_t dev)
1645 {
1646 	device_set_desc(dev, "software crypto");
1647 	device_quiet(dev);
1648 	return (BUS_PROBE_NOWILDCARD);
1649 }
1650 
1651 static int
1652 swcr_attach(device_t dev)
1653 {
1654 
1655 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1656 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1657 	if (swcr_id < 0) {
1658 		device_printf(dev, "cannot initialize!");
1659 		return (ENXIO);
1660 	}
1661 
1662 	return (0);
1663 }
1664 
1665 static int
1666 swcr_detach(device_t dev)
1667 {
1668 	crypto_unregister_all(swcr_id);
1669 	return 0;
1670 }
1671 
1672 static device_method_t swcr_methods[] = {
1673 	DEVMETHOD(device_identify,	swcr_identify),
1674 	DEVMETHOD(device_probe,		swcr_probe),
1675 	DEVMETHOD(device_attach,	swcr_attach),
1676 	DEVMETHOD(device_detach,	swcr_detach),
1677 
1678 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1679 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1680 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1681 	DEVMETHOD(cryptodev_process,	swcr_process),
1682 
1683 	{0, 0},
1684 };
1685 
1686 static driver_t swcr_driver = {
1687 	"cryptosoft",
1688 	swcr_methods,
1689 	0,		/* NB: no softc */
1690 };
1691 static devclass_t swcr_devclass;
1692 
1693 /*
1694  * NB: We explicitly reference the crypto module so we
1695  * get the necessary ordering when built as a loadable
1696  * module.  This is required because we bundle the crypto
1697  * module code together with the cryptosoft driver (otherwise
1698  * normal module dependencies would handle things).
1699  */
1700 extern int crypto_modevent(struct module *, int, void *);
1701 /* XXX where to attach */
1702 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1703 MODULE_VERSION(cryptosoft, 1);
1704 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1705