xref: /freebsd/sys/opencrypto/cryptosoft.c (revision 66df505066f51e6d8411b966765d828817f88971)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014-2021 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Portions of this software were developed by Ararat River
20  * Consulting, LLC under sponsorship of the FreeBSD Foundation.
21  *
22  * Permission to use, copy, and modify this software with or without fee
23  * is hereby granted, provided that this entire notice is included in
24  * all source code copies of any software which is or includes a copy or
25  * modification of this software.
26  *
27  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
28  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
29  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
30  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
31  * PURPOSE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/sysctl.h>
43 #include <sys/errno.h>
44 #include <sys/random.h>
45 #include <sys/kernel.h>
46 #include <sys/uio.h>
47 #include <sys/endian.h>
48 #include <sys/limits.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	const struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 	bool		sw_hmac;
66 };
67 
68 struct swcr_encdec {
69 	void		*sw_ctx;
70 	const struct enc_xform *sw_exf;
71 };
72 
73 struct swcr_compdec {
74 	const struct comp_algo *sw_cxf;
75 };
76 
77 struct swcr_session {
78 	int	(*swcr_process)(const struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(const struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(const struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
104 	unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
105 	const struct crypto_session_params *csp;
106 	const struct enc_xform *exf;
107 	const struct swcr_encdec *sw;
108 	void *ctx;
109 	size_t inlen, outlen;
110 	int i, blks, resid;
111 	struct crypto_buffer_cursor cc_in, cc_out;
112 	const unsigned char *inblk;
113 	unsigned char *outblk;
114 	int error;
115 	bool encrypting;
116 
117 	error = 0;
118 
119 	sw = &ses->swcr_encdec;
120 	exf = sw->sw_exf;
121 	csp = crypto_get_params(crp->crp_session);
122 
123 	if (exf->native_blocksize == 0) {
124 		/* Check for non-padded data */
125 		if ((crp->crp_payload_length % exf->blocksize) != 0)
126 			return (EINVAL);
127 
128 		blks = exf->blocksize;
129 	} else
130 		blks = exf->native_blocksize;
131 
132 	if (exf == &enc_xform_aes_icm &&
133 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
134 		return (EINVAL);
135 
136 	ctx = __builtin_alloca(exf->ctxsize);
137 	if (crp->crp_cipher_key != NULL) {
138 		error = exf->setkey(ctx, crp->crp_cipher_key,
139 		    csp->csp_cipher_klen);
140 		if (error)
141 			return (error);
142 	} else
143 		memcpy(ctx, sw->sw_ctx, exf->ctxsize);
144 
145 	crypto_read_iv(crp, iv);
146 
147 	if (exf->reinit) {
148 		/*
149 		 * xforms that provide a reinit method perform all IV
150 		 * handling themselves.
151 		 */
152 		exf->reinit(ctx, iv, csp->csp_ivlen);
153 	}
154 
155 	ivp = iv;
156 
157 	crypto_cursor_init(&cc_in, &crp->crp_buf);
158 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
159 	inblk = crypto_cursor_segment(&cc_in, &inlen);
160 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
161 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
162 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
163 	} else
164 		cc_out = cc_in;
165 	outblk = crypto_cursor_segment(&cc_out, &outlen);
166 
167 	resid = crp->crp_payload_length;
168 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
169 
170 	/*
171 	 * Loop through encrypting blocks.  'inlen' is the remaining
172 	 * length of the current segment in the input buffer.
173 	 * 'outlen' is the remaining length of current segment in the
174 	 * output buffer.
175 	 */
176 	while (resid >= blks) {
177 		/*
178 		 * If the current block is not contained within the
179 		 * current input/output segment, use 'blk' as a local
180 		 * buffer.
181 		 */
182 		if (inlen < blks) {
183 			crypto_cursor_copydata(&cc_in, blks, blk);
184 			inblk = blk;
185 		}
186 		if (outlen < blks)
187 			outblk = blk;
188 
189 		/*
190 		 * Ciphers without a 'reinit' hook are assumed to be
191 		 * used in CBC mode where the chaining is done here.
192 		 */
193 		if (exf->reinit != NULL) {
194 			if (encrypting)
195 				exf->encrypt(ctx, inblk, outblk);
196 			else
197 				exf->decrypt(ctx, inblk, outblk);
198 		} else if (encrypting) {
199 			/* XOR with previous block */
200 			for (i = 0; i < blks; i++)
201 				outblk[i] = inblk[i] ^ ivp[i];
202 
203 			exf->encrypt(ctx, outblk, outblk);
204 
205 			/*
206 			 * Keep encrypted block for XOR'ing
207 			 * with next block
208 			 */
209 			memcpy(iv, outblk, blks);
210 			ivp = iv;
211 		} else {	/* decrypt */
212 			/*
213 			 * Keep encrypted block for XOR'ing
214 			 * with next block
215 			 */
216 			nivp = (ivp == iv) ? iv2 : iv;
217 			memcpy(nivp, inblk, blks);
218 
219 			exf->decrypt(ctx, inblk, outblk);
220 
221 			/* XOR with previous block */
222 			for (i = 0; i < blks; i++)
223 				outblk[i] ^= ivp[i];
224 
225 			ivp = nivp;
226 		}
227 
228 		if (inlen < blks) {
229 			inblk = crypto_cursor_segment(&cc_in, &inlen);
230 		} else {
231 			crypto_cursor_advance(&cc_in, blks);
232 			inlen -= blks;
233 			inblk += blks;
234 		}
235 
236 		if (outlen < blks) {
237 			crypto_cursor_copyback(&cc_out, blks, blk);
238 			outblk = crypto_cursor_segment(&cc_out, &outlen);
239 		} else {
240 			crypto_cursor_advance(&cc_out, blks);
241 			outlen -= blks;
242 			outblk += blks;
243 		}
244 
245 		resid -= blks;
246 	}
247 
248 	/* Handle trailing partial block for stream ciphers. */
249 	if (resid > 0) {
250 		KASSERT(exf->native_blocksize != 0,
251 		    ("%s: partial block of %d bytes for cipher %s",
252 		    __func__, i, exf->name));
253 		KASSERT(exf->reinit != NULL,
254 		    ("%s: partial block cipher %s without reinit hook",
255 		    __func__, exf->name));
256 		KASSERT(resid < blks, ("%s: partial block too big", __func__));
257 
258 		inblk = crypto_cursor_segment(&cc_in, &inlen);
259 		outblk = crypto_cursor_segment(&cc_out, &outlen);
260 		if (inlen < resid) {
261 			crypto_cursor_copydata(&cc_in, resid, blk);
262 			inblk = blk;
263 		}
264 		if (outlen < resid)
265 			outblk = blk;
266 		if (encrypting)
267 			exf->encrypt_last(ctx, inblk, outblk,
268 			    resid);
269 		else
270 			exf->decrypt_last(ctx, inblk, outblk,
271 			    resid);
272 		if (outlen < resid)
273 			crypto_cursor_copyback(&cc_out, resid, blk);
274 	}
275 
276 	explicit_bzero(ctx, exf->ctxsize);
277 	explicit_bzero(blk, sizeof(blk));
278 	explicit_bzero(iv, sizeof(iv));
279 	explicit_bzero(iv2, sizeof(iv2));
280 	return (0);
281 }
282 
283 /*
284  * Compute or verify hash.
285  */
286 static int
287 swcr_authcompute(const struct swcr_session *ses, struct cryptop *crp)
288 {
289 	u_char aalg[HASH_MAX_LEN];
290 	const struct crypto_session_params *csp;
291 	const struct swcr_auth *sw;
292 	const struct auth_hash *axf;
293 	union authctx ctx;
294 	int err;
295 
296 	sw = &ses->swcr_auth;
297 
298 	axf = sw->sw_axf;
299 
300 	csp = crypto_get_params(crp->crp_session);
301 	if (crp->crp_auth_key != NULL) {
302 		if (sw->sw_hmac) {
303 			hmac_init_ipad(axf, crp->crp_auth_key,
304 			    csp->csp_auth_klen, &ctx);
305 		} else {
306 			axf->Init(&ctx);
307 			axf->Setkey(&ctx, crp->crp_auth_key,
308 			    csp->csp_auth_klen);
309 		}
310 	} else
311 		memcpy(&ctx, sw->sw_ictx, axf->ctxsize);
312 
313 	if (crp->crp_aad != NULL)
314 		err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length);
315 	else
316 		err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
317 		    axf->Update, &ctx);
318 	if (err)
319 		goto out;
320 
321 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
322 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
323 		err = crypto_apply_buf(&crp->crp_obuf,
324 		    crp->crp_payload_output_start, crp->crp_payload_length,
325 		    axf->Update, &ctx);
326 	else
327 		err = crypto_apply(crp, crp->crp_payload_start,
328 		    crp->crp_payload_length, axf->Update, &ctx);
329 	if (err)
330 		goto out;
331 
332 	if (csp->csp_flags & CSP_F_ESN)
333 		axf->Update(&ctx, crp->crp_esn, 4);
334 
335 	axf->Final(aalg, &ctx);
336 	if (sw->sw_hmac) {
337 		if (crp->crp_auth_key != NULL)
338 			hmac_init_opad(axf, crp->crp_auth_key,
339 			    csp->csp_auth_klen, &ctx);
340 		else
341 			memcpy(&ctx, sw->sw_octx, axf->ctxsize);
342 		axf->Update(&ctx, aalg, axf->hashsize);
343 		axf->Final(aalg, &ctx);
344 	}
345 
346 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
347 		u_char uaalg[HASH_MAX_LEN];
348 
349 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
350 		if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
351 			err = EBADMSG;
352 		explicit_bzero(uaalg, sizeof(uaalg));
353 	} else {
354 		/* Inject the authentication data */
355 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
356 	}
357 	explicit_bzero(aalg, sizeof(aalg));
358 out:
359 	explicit_bzero(&ctx, sizeof(ctx));
360 	return (err);
361 }
362 
363 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
364 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
365 
366 static int
367 swcr_gmac(const struct swcr_session *ses, struct cryptop *crp)
368 {
369 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
370 	u_char *blk = (u_char *)blkbuf;
371 	u_char tag[GMAC_DIGEST_LEN];
372 	u_char iv[AES_BLOCK_LEN];
373 	struct crypto_buffer_cursor cc;
374 	const u_char *inblk;
375 	union authctx ctx;
376 	const struct swcr_auth *swa;
377 	const struct auth_hash *axf;
378 	uint32_t *blkp;
379 	size_t len;
380 	int blksz, error, ivlen, resid;
381 
382 	swa = &ses->swcr_auth;
383 	axf = swa->sw_axf;
384 	blksz = GMAC_BLOCK_LEN;
385 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
386 	    __func__));
387 
388 	if (crp->crp_auth_key != NULL) {
389 		axf->Init(&ctx);
390 		axf->Setkey(&ctx, crp->crp_auth_key,
391 		    crypto_get_params(crp->crp_session)->csp_auth_klen);
392 	} else
393 		memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
394 
395 	/* Initialize the IV */
396 	ivlen = AES_GCM_IV_LEN;
397 	crypto_read_iv(crp, iv);
398 
399 	axf->Reinit(&ctx, iv, ivlen);
400 	crypto_cursor_init(&cc, &crp->crp_buf);
401 	crypto_cursor_advance(&cc, crp->crp_payload_start);
402 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
403 		inblk = crypto_cursor_segment(&cc, &len);
404 		if (len >= blksz) {
405 			len = rounddown(MIN(len, resid), blksz);
406 			crypto_cursor_advance(&cc, len);
407 		} else {
408 			len = blksz;
409 			crypto_cursor_copydata(&cc, len, blk);
410 			inblk = blk;
411 		}
412 		axf->Update(&ctx, inblk, len);
413 	}
414 	if (resid > 0) {
415 		memset(blk, 0, blksz);
416 		crypto_cursor_copydata(&cc, resid, blk);
417 		axf->Update(&ctx, blk, blksz);
418 	}
419 
420 	/* length block */
421 	memset(blk, 0, blksz);
422 	blkp = (uint32_t *)blk + 1;
423 	*blkp = htobe32(crp->crp_payload_length * 8);
424 	axf->Update(&ctx, blk, blksz);
425 
426 	/* Finalize MAC */
427 	axf->Final(tag, &ctx);
428 
429 	error = 0;
430 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
431 		u_char tag2[GMAC_DIGEST_LEN];
432 
433 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
434 		    tag2);
435 		if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
436 			error = EBADMSG;
437 		explicit_bzero(tag2, sizeof(tag2));
438 	} else {
439 		/* Inject the authentication data */
440 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
441 	}
442 	explicit_bzero(blkbuf, sizeof(blkbuf));
443 	explicit_bzero(tag, sizeof(tag));
444 	explicit_bzero(iv, sizeof(iv));
445 	return (error);
446 }
447 
448 static int
449 swcr_gcm(const struct swcr_session *ses, struct cryptop *crp)
450 {
451 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
452 	u_char *blk = (u_char *)blkbuf;
453 	u_char tag[GMAC_DIGEST_LEN];
454 	struct crypto_buffer_cursor cc_in, cc_out;
455 	const u_char *inblk;
456 	u_char *outblk;
457 	const struct swcr_auth *swa;
458 	const struct swcr_encdec *swe;
459 	const struct enc_xform *exf;
460 	void *ctx;
461 	uint32_t *blkp;
462 	size_t len;
463 	int blksz, error, ivlen, r, resid;
464 
465 	swa = &ses->swcr_auth;
466 	swe = &ses->swcr_encdec;
467 	exf = swe->sw_exf;
468 	blksz = GMAC_BLOCK_LEN;
469 	KASSERT(blksz == exf->native_blocksize,
470 	    ("%s: blocksize mismatch", __func__));
471 
472 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
473 		return (EINVAL);
474 
475 	ivlen = AES_GCM_IV_LEN;
476 
477 	ctx = __builtin_alloca(exf->ctxsize);
478 	if (crp->crp_cipher_key != NULL)
479 		exf->setkey(ctx, crp->crp_cipher_key,
480 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
481 	else
482 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
483 	exf->reinit(ctx, crp->crp_iv, ivlen);
484 
485 	/* Supply MAC with AAD */
486 	if (crp->crp_aad != NULL) {
487 		len = rounddown(crp->crp_aad_length, blksz);
488 		if (len != 0)
489 			exf->update(ctx, crp->crp_aad, len);
490 		if (crp->crp_aad_length != len) {
491 			memset(blk, 0, blksz);
492 			memcpy(blk, (char *)crp->crp_aad + len,
493 			    crp->crp_aad_length - len);
494 			exf->update(ctx, blk, blksz);
495 		}
496 	} else {
497 		crypto_cursor_init(&cc_in, &crp->crp_buf);
498 		crypto_cursor_advance(&cc_in, crp->crp_aad_start);
499 		for (resid = crp->crp_aad_length; resid >= blksz;
500 		     resid -= len) {
501 			inblk = crypto_cursor_segment(&cc_in, &len);
502 			if (len >= blksz) {
503 				len = rounddown(MIN(len, resid), blksz);
504 				crypto_cursor_advance(&cc_in, len);
505 			} else {
506 				len = blksz;
507 				crypto_cursor_copydata(&cc_in, len, blk);
508 				inblk = blk;
509 			}
510 			exf->update(ctx, inblk, len);
511 		}
512 		if (resid > 0) {
513 			memset(blk, 0, blksz);
514 			crypto_cursor_copydata(&cc_in, resid, blk);
515 			exf->update(ctx, blk, blksz);
516 		}
517 	}
518 
519 	/* Do encryption with MAC */
520 	crypto_cursor_init(&cc_in, &crp->crp_buf);
521 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
522 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
523 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
524 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
525 	} else
526 		cc_out = cc_in;
527 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
528 		inblk = crypto_cursor_segment(&cc_in, &len);
529 		if (len < blksz) {
530 			crypto_cursor_copydata(&cc_in, blksz, blk);
531 			inblk = blk;
532 		} else {
533 			crypto_cursor_advance(&cc_in, blksz);
534 		}
535 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
536 			outblk = crypto_cursor_segment(&cc_out, &len);
537 			if (len < blksz)
538 				outblk = blk;
539 			exf->encrypt(ctx, inblk, outblk);
540 			exf->update(ctx, outblk, blksz);
541 			if (outblk == blk)
542 				crypto_cursor_copyback(&cc_out, blksz, blk);
543 			else
544 				crypto_cursor_advance(&cc_out, blksz);
545 		} else {
546 			exf->update(ctx, inblk, blksz);
547 		}
548 	}
549 	if (resid > 0) {
550 		crypto_cursor_copydata(&cc_in, resid, blk);
551 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
552 			exf->encrypt_last(ctx, blk, blk, resid);
553 			crypto_cursor_copyback(&cc_out, resid, blk);
554 		}
555 		exf->update(ctx, blk, resid);
556 	}
557 
558 	/* length block */
559 	memset(blk, 0, blksz);
560 	blkp = (uint32_t *)blk + 1;
561 	*blkp = htobe32(crp->crp_aad_length * 8);
562 	blkp = (uint32_t *)blk + 3;
563 	*blkp = htobe32(crp->crp_payload_length * 8);
564 	exf->update(ctx, blk, blksz);
565 
566 	/* Finalize MAC */
567 	exf->final(tag, ctx);
568 
569 	/* Validate tag */
570 	error = 0;
571 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
572 		u_char tag2[GMAC_DIGEST_LEN];
573 
574 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
575 
576 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
577 		explicit_bzero(tag2, sizeof(tag2));
578 		if (r != 0) {
579 			error = EBADMSG;
580 			goto out;
581 		}
582 
583 		/* tag matches, decrypt data */
584 		crypto_cursor_init(&cc_in, &crp->crp_buf);
585 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
586 		for (resid = crp->crp_payload_length; resid > blksz;
587 		     resid -= blksz) {
588 			inblk = crypto_cursor_segment(&cc_in, &len);
589 			if (len < blksz) {
590 				crypto_cursor_copydata(&cc_in, blksz, blk);
591 				inblk = blk;
592 			} else
593 				crypto_cursor_advance(&cc_in, blksz);
594 			outblk = crypto_cursor_segment(&cc_out, &len);
595 			if (len < blksz)
596 				outblk = blk;
597 			exf->decrypt(ctx, inblk, outblk);
598 			if (outblk == blk)
599 				crypto_cursor_copyback(&cc_out, blksz, blk);
600 			else
601 				crypto_cursor_advance(&cc_out, blksz);
602 		}
603 		if (resid > 0) {
604 			crypto_cursor_copydata(&cc_in, resid, blk);
605 			exf->decrypt_last(ctx, blk, blk, resid);
606 			crypto_cursor_copyback(&cc_out, resid, blk);
607 		}
608 	} else {
609 		/* Inject the authentication data */
610 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
611 	}
612 
613 out:
614 	explicit_bzero(ctx, exf->ctxsize);
615 	explicit_bzero(blkbuf, sizeof(blkbuf));
616 	explicit_bzero(tag, sizeof(tag));
617 
618 	return (error);
619 }
620 
621 static void
622 build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length,
623     u_int data_length, u_int tag_length, uint8_t *b0)
624 {
625 	uint8_t *bp;
626 	uint8_t flags, L;
627 
628 	KASSERT(nonce_length >= 7 && nonce_length <= 13,
629 	    ("nonce_length must be between 7 and 13 bytes"));
630 
631 	/*
632 	 * Need to determine the L field value.  This is the number of
633 	 * bytes needed to specify the length of the message; the length
634 	 * is whatever is left in the 16 bytes after specifying flags and
635 	 * the nonce.
636 	 */
637 	L = 15 - nonce_length;
638 
639 	flags = ((aad_length > 0) << 6) +
640 	    (((tag_length - 2) / 2) << 3) +
641 	    L - 1;
642 
643 	/*
644 	 * Now we need to set up the first block, which has flags, nonce,
645 	 * and the message length.
646 	 */
647 	b0[0] = flags;
648 	memcpy(b0 + 1, nonce, nonce_length);
649 	bp = b0 + 1 + nonce_length;
650 
651 	/* Need to copy L' [aka L-1] bytes of data_length */
652 	for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) {
653 		*dst = data_length;
654 		data_length >>= 8;
655 	}
656 }
657 
658 /* NB: OCF only supports AAD lengths < 2^32. */
659 static int
660 build_ccm_aad_length(u_int aad_length, uint8_t *blk)
661 {
662 	if (aad_length < ((1 << 16) - (1 << 8))) {
663 		be16enc(blk, aad_length);
664 		return (sizeof(uint16_t));
665 	} else {
666 		blk[0] = 0xff;
667 		blk[1] = 0xfe;
668 		be32enc(blk + 2, aad_length);
669 		return (2 + sizeof(uint32_t));
670 	}
671 }
672 
673 static int
674 swcr_ccm_cbc_mac(const struct swcr_session *ses, struct cryptop *crp)
675 {
676 	u_char iv[AES_BLOCK_LEN];
677 	u_char blk[CCM_CBC_BLOCK_LEN];
678 	u_char tag[AES_CBC_MAC_HASH_LEN];
679 	union authctx ctx;
680 	const struct crypto_session_params *csp;
681 	const struct swcr_auth *swa;
682 	const struct auth_hash *axf;
683 	int error, ivlen, len;
684 
685 	csp = crypto_get_params(crp->crp_session);
686 	swa = &ses->swcr_auth;
687 	axf = swa->sw_axf;
688 
689 	if (crp->crp_auth_key != NULL) {
690 		axf->Init(&ctx);
691 		axf->Setkey(&ctx, crp->crp_auth_key, csp->csp_auth_klen);
692 	} else
693 		memcpy(&ctx, swa->sw_ictx, axf->ctxsize);
694 
695 	/* Initialize the IV */
696 	ivlen = csp->csp_ivlen;
697 	crypto_read_iv(crp, iv);
698 
699 	/* Supply MAC with IV */
700 	axf->Reinit(&ctx, crp->crp_iv, ivlen);
701 
702 	/* Supply MAC with b0. */
703 	build_ccm_b0(crp->crp_iv, ivlen, crp->crp_payload_length, 0,
704 	    swa->sw_mlen, blk);
705 	axf->Update(&ctx, blk, CCM_CBC_BLOCK_LEN);
706 
707 	len = build_ccm_aad_length(crp->crp_payload_length, blk);
708 	axf->Update(&ctx, blk, len);
709 
710 	crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
711 	    axf->Update, &ctx);
712 
713 	/* Finalize MAC */
714 	axf->Final(tag, &ctx);
715 
716 	error = 0;
717 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
718 		u_char tag2[AES_CBC_MAC_HASH_LEN];
719 
720 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
721 		    tag2);
722 		if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0)
723 			error = EBADMSG;
724 		explicit_bzero(tag2, sizeof(tag));
725 	} else {
726 		/* Inject the authentication data */
727 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
728 	}
729 	explicit_bzero(tag, sizeof(tag));
730 	explicit_bzero(blk, sizeof(blk));
731 	explicit_bzero(iv, sizeof(iv));
732 	return (error);
733 }
734 
735 static int
736 swcr_ccm(const struct swcr_session *ses, struct cryptop *crp)
737 {
738 	const struct crypto_session_params *csp;
739 	uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
740 	u_char *blk = (u_char *)blkbuf;
741 	u_char tag[AES_CBC_MAC_HASH_LEN];
742 	struct crypto_buffer_cursor cc_in, cc_out;
743 	const u_char *inblk;
744 	u_char *outblk;
745 	const struct swcr_auth *swa;
746 	const struct swcr_encdec *swe;
747 	const struct enc_xform *exf;
748 	void *ctx;
749 	size_t len;
750 	int blksz, error, ivlen, r, resid;
751 
752 	csp = crypto_get_params(crp->crp_session);
753 	swa = &ses->swcr_auth;
754 	swe = &ses->swcr_encdec;
755 	exf = swe->sw_exf;
756 	blksz = AES_BLOCK_LEN;
757 	KASSERT(blksz == exf->native_blocksize,
758 	    ("%s: blocksize mismatch", __func__));
759 
760 	if (crp->crp_payload_length > ccm_max_payload_length(csp))
761 		return (EMSGSIZE);
762 
763 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
764 		return (EINVAL);
765 
766 	ivlen = csp->csp_ivlen;
767 
768 	ctx = __builtin_alloca(exf->ctxsize);
769 	if (crp->crp_cipher_key != NULL)
770 		exf->setkey(ctx, crp->crp_cipher_key,
771 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
772 	else
773 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
774 	exf->reinit(ctx, crp->crp_iv, ivlen);
775 
776 	/* Supply MAC with b0. */
777 	_Static_assert(sizeof(blkbuf) >= CCM_CBC_BLOCK_LEN,
778 	    "blkbuf too small for b0");
779 	build_ccm_b0(crp->crp_iv, ivlen, crp->crp_aad_length,
780 	    crp->crp_payload_length, swa->sw_mlen, blk);
781 	exf->update(ctx, blk, CCM_CBC_BLOCK_LEN);
782 
783 	/* Supply MAC with AAD */
784 	if (crp->crp_aad_length != 0) {
785 		len = build_ccm_aad_length(crp->crp_aad_length, blk);
786 		exf->update(ctx, blk, len);
787 		if (crp->crp_aad != NULL)
788 			exf->update(ctx, crp->crp_aad, crp->crp_aad_length);
789 		else
790 			crypto_apply(crp, crp->crp_aad_start,
791 			    crp->crp_aad_length, exf->update, ctx);
792 
793 		/* Pad the AAD (including length field) to a full block. */
794 		len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN;
795 		if (len != 0) {
796 			len = CCM_CBC_BLOCK_LEN - len;
797 			memset(blk, 0, CCM_CBC_BLOCK_LEN);
798 			exf->update(ctx, blk, len);
799 		}
800 	}
801 
802 	/* Do encryption/decryption with MAC */
803 	crypto_cursor_init(&cc_in, &crp->crp_buf);
804 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
805 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
806 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
807 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
808 	} else
809 		cc_out = cc_in;
810 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
811 		inblk = crypto_cursor_segment(&cc_in, &len);
812 		if (len < blksz) {
813 			crypto_cursor_copydata(&cc_in, blksz, blk);
814 			inblk = blk;
815 		} else
816 			crypto_cursor_advance(&cc_in, blksz);
817 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
818 			outblk = crypto_cursor_segment(&cc_out, &len);
819 			if (len < blksz)
820 				outblk = blk;
821 			exf->update(ctx, inblk, blksz);
822 			exf->encrypt(ctx, inblk, outblk);
823 			if (outblk == blk)
824 				crypto_cursor_copyback(&cc_out, blksz, blk);
825 			else
826 				crypto_cursor_advance(&cc_out, blksz);
827 		} else {
828 			/*
829 			 * One of the problems with CCM+CBC is that
830 			 * the authentication is done on the
831 			 * unencrypted data.  As a result, we have to
832 			 * decrypt the data twice: once to generate
833 			 * the tag and a second time after the tag is
834 			 * verified.
835 			 */
836 			exf->decrypt(ctx, inblk, blk);
837 			exf->update(ctx, blk, blksz);
838 		}
839 	}
840 	if (resid > 0) {
841 		crypto_cursor_copydata(&cc_in, resid, blk);
842 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
843 			exf->update(ctx, blk, resid);
844 			exf->encrypt_last(ctx, blk, blk, resid);
845 			crypto_cursor_copyback(&cc_out, resid, blk);
846 		} else {
847 			exf->decrypt_last(ctx, blk, blk, resid);
848 			exf->update(ctx, blk, resid);
849 		}
850 	}
851 
852 	/* Finalize MAC */
853 	exf->final(tag, ctx);
854 
855 	/* Validate tag */
856 	error = 0;
857 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
858 		u_char tag2[AES_CBC_MAC_HASH_LEN];
859 
860 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
861 		    tag2);
862 
863 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
864 		explicit_bzero(tag2, sizeof(tag2));
865 		if (r != 0) {
866 			error = EBADMSG;
867 			goto out;
868 		}
869 
870 		/* tag matches, decrypt data */
871 		exf->reinit(ctx, crp->crp_iv, ivlen);
872 		crypto_cursor_init(&cc_in, &crp->crp_buf);
873 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
874 		for (resid = crp->crp_payload_length; resid > blksz;
875 		     resid -= blksz) {
876 			inblk = crypto_cursor_segment(&cc_in, &len);
877 			if (len < blksz) {
878 				crypto_cursor_copydata(&cc_in, blksz, blk);
879 				inblk = blk;
880 			} else
881 				crypto_cursor_advance(&cc_in, blksz);
882 			outblk = crypto_cursor_segment(&cc_out, &len);
883 			if (len < blksz)
884 				outblk = blk;
885 			exf->decrypt(ctx, inblk, outblk);
886 			if (outblk == blk)
887 				crypto_cursor_copyback(&cc_out, blksz, blk);
888 			else
889 				crypto_cursor_advance(&cc_out, blksz);
890 		}
891 		if (resid > 0) {
892 			crypto_cursor_copydata(&cc_in, resid, blk);
893 			exf->decrypt_last(ctx, blk, blk, resid);
894 			crypto_cursor_copyback(&cc_out, resid, blk);
895 		}
896 	} else {
897 		/* Inject the authentication data */
898 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
899 	}
900 
901 out:
902 	explicit_bzero(ctx, exf->ctxsize);
903 	explicit_bzero(blkbuf, sizeof(blkbuf));
904 	explicit_bzero(tag, sizeof(tag));
905 	return (error);
906 }
907 
908 static int
909 swcr_chacha20_poly1305(const struct swcr_session *ses, struct cryptop *crp)
910 {
911 	const struct crypto_session_params *csp;
912 	uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))];
913 	u_char *blk = (u_char *)blkbuf;
914 	u_char tag[POLY1305_HASH_LEN];
915 	struct crypto_buffer_cursor cc_in, cc_out;
916 	const u_char *inblk;
917 	u_char *outblk;
918 	uint64_t *blkp;
919 	const struct swcr_auth *swa;
920 	const struct swcr_encdec *swe;
921 	const struct enc_xform *exf;
922 	void *ctx;
923 	size_t len;
924 	int blksz, error, r, resid;
925 
926 	swa = &ses->swcr_auth;
927 	swe = &ses->swcr_encdec;
928 	exf = swe->sw_exf;
929 	blksz = exf->native_blocksize;
930 	KASSERT(blksz <= sizeof(blkbuf), ("%s: blocksize mismatch", __func__));
931 
932 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
933 		return (EINVAL);
934 
935 	csp = crypto_get_params(crp->crp_session);
936 
937 	ctx = __builtin_alloca(exf->ctxsize);
938 	if (crp->crp_cipher_key != NULL)
939 		exf->setkey(ctx, crp->crp_cipher_key,
940 		    csp->csp_cipher_klen);
941 	else
942 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
943 	exf->reinit(ctx, crp->crp_iv, csp->csp_ivlen);
944 
945 	/* Supply MAC with AAD */
946 	if (crp->crp_aad != NULL)
947 		exf->update(ctx, crp->crp_aad, crp->crp_aad_length);
948 	else
949 		crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
950 		    exf->update, ctx);
951 	if (crp->crp_aad_length % 16 != 0) {
952 		/* padding1 */
953 		memset(blk, 0, 16);
954 		exf->update(ctx, blk, 16 - crp->crp_aad_length % 16);
955 	}
956 
957 	/* Do encryption with MAC */
958 	crypto_cursor_init(&cc_in, &crp->crp_buf);
959 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
960 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
961 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
962 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
963 	} else
964 		cc_out = cc_in;
965 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
966 		inblk = crypto_cursor_segment(&cc_in, &len);
967 		if (len < blksz) {
968 			crypto_cursor_copydata(&cc_in, blksz, blk);
969 			inblk = blk;
970 		} else
971 			crypto_cursor_advance(&cc_in, blksz);
972 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
973 			outblk = crypto_cursor_segment(&cc_out, &len);
974 			if (len < blksz)
975 				outblk = blk;
976 			exf->encrypt(ctx, inblk, outblk);
977 			exf->update(ctx, outblk, blksz);
978 			if (outblk == blk)
979 				crypto_cursor_copyback(&cc_out, blksz, blk);
980 			else
981 				crypto_cursor_advance(&cc_out, blksz);
982 		} else {
983 			exf->update(ctx, inblk, blksz);
984 		}
985 	}
986 	if (resid > 0) {
987 		crypto_cursor_copydata(&cc_in, resid, blk);
988 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
989 			exf->encrypt_last(ctx, blk, blk, resid);
990 			crypto_cursor_copyback(&cc_out, resid, blk);
991 		}
992 		exf->update(ctx, blk, resid);
993 		if (resid % 16 != 0) {
994 			/* padding2 */
995 			memset(blk, 0, 16);
996 			exf->update(ctx, blk, 16 - resid % 16);
997 		}
998 	}
999 
1000 	/* lengths */
1001 	blkp = (uint64_t *)blk;
1002 	blkp[0] = htole64(crp->crp_aad_length);
1003 	blkp[1] = htole64(crp->crp_payload_length);
1004 	exf->update(ctx, blk, sizeof(uint64_t) * 2);
1005 
1006 	/* Finalize MAC */
1007 	exf->final(tag, ctx);
1008 
1009 	/* Validate tag */
1010 	error = 0;
1011 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1012 		u_char tag2[POLY1305_HASH_LEN];
1013 
1014 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2);
1015 
1016 		r = timingsafe_bcmp(tag, tag2, swa->sw_mlen);
1017 		explicit_bzero(tag2, sizeof(tag2));
1018 		if (r != 0) {
1019 			error = EBADMSG;
1020 			goto out;
1021 		}
1022 
1023 		/* tag matches, decrypt data */
1024 		crypto_cursor_init(&cc_in, &crp->crp_buf);
1025 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
1026 		for (resid = crp->crp_payload_length; resid > blksz;
1027 		     resid -= blksz) {
1028 			inblk = crypto_cursor_segment(&cc_in, &len);
1029 			if (len < blksz) {
1030 				crypto_cursor_copydata(&cc_in, blksz, blk);
1031 				inblk = blk;
1032 			} else
1033 				crypto_cursor_advance(&cc_in, blksz);
1034 			outblk = crypto_cursor_segment(&cc_out, &len);
1035 			if (len < blksz)
1036 				outblk = blk;
1037 			exf->decrypt(ctx, inblk, outblk);
1038 			if (outblk == blk)
1039 				crypto_cursor_copyback(&cc_out, blksz, blk);
1040 			else
1041 				crypto_cursor_advance(&cc_out, blksz);
1042 		}
1043 		if (resid > 0) {
1044 			crypto_cursor_copydata(&cc_in, resid, blk);
1045 			exf->decrypt_last(ctx, blk, blk, resid);
1046 			crypto_cursor_copyback(&cc_out, resid, blk);
1047 		}
1048 	} else {
1049 		/* Inject the authentication data */
1050 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag);
1051 	}
1052 
1053 out:
1054 	explicit_bzero(ctx, exf->ctxsize);
1055 	explicit_bzero(blkbuf, sizeof(blkbuf));
1056 	explicit_bzero(tag, sizeof(tag));
1057 	return (error);
1058 }
1059 
1060 /*
1061  * Apply a cipher and a digest to perform EtA.
1062  */
1063 static int
1064 swcr_eta(const struct swcr_session *ses, struct cryptop *crp)
1065 {
1066 	int error;
1067 
1068 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1069 		error = swcr_encdec(ses, crp);
1070 		if (error == 0)
1071 			error = swcr_authcompute(ses, crp);
1072 	} else {
1073 		error = swcr_authcompute(ses, crp);
1074 		if (error == 0)
1075 			error = swcr_encdec(ses, crp);
1076 	}
1077 	return (error);
1078 }
1079 
1080 /*
1081  * Apply a compression/decompression algorithm
1082  */
1083 static int
1084 swcr_compdec(const struct swcr_session *ses, struct cryptop *crp)
1085 {
1086 	const struct comp_algo *cxf;
1087 	uint8_t *data, *out;
1088 	int adj;
1089 	uint32_t result;
1090 
1091 	cxf = ses->swcr_compdec.sw_cxf;
1092 
1093 	/* We must handle the whole buffer of data in one time
1094 	 * then if there is not all the data in the mbuf, we must
1095 	 * copy in a buffer.
1096 	 */
1097 
1098 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
1099 	if (data == NULL)
1100 		return (EINVAL);
1101 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
1102 	    data);
1103 
1104 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
1105 		result = cxf->compress(data, crp->crp_payload_length, &out);
1106 	else
1107 		result = cxf->decompress(data, crp->crp_payload_length, &out);
1108 
1109 	free(data, M_CRYPTO_DATA);
1110 	if (result == 0)
1111 		return (EINVAL);
1112 	crp->crp_olen = result;
1113 
1114 	/* Check the compressed size when doing compression */
1115 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
1116 		if (result >= crp->crp_payload_length) {
1117 			/* Compression was useless, we lost time */
1118 			free(out, M_CRYPTO_DATA);
1119 			return (0);
1120 		}
1121 	}
1122 
1123 	/* Copy back the (de)compressed data. m_copyback is
1124 	 * extending the mbuf as necessary.
1125 	 */
1126 	crypto_copyback(crp, crp->crp_payload_start, result, out);
1127 	if (result < crp->crp_payload_length) {
1128 		switch (crp->crp_buf.cb_type) {
1129 		case CRYPTO_BUF_MBUF:
1130 		case CRYPTO_BUF_SINGLE_MBUF:
1131 			adj = result - crp->crp_payload_length;
1132 			m_adj(crp->crp_buf.cb_mbuf, adj);
1133 			break;
1134 		case CRYPTO_BUF_UIO: {
1135 			struct uio *uio = crp->crp_buf.cb_uio;
1136 			int ind;
1137 
1138 			adj = crp->crp_payload_length - result;
1139 			ind = uio->uio_iovcnt - 1;
1140 
1141 			while (adj > 0 && ind >= 0) {
1142 				if (adj < uio->uio_iov[ind].iov_len) {
1143 					uio->uio_iov[ind].iov_len -= adj;
1144 					break;
1145 				}
1146 
1147 				adj -= uio->uio_iov[ind].iov_len;
1148 				uio->uio_iov[ind].iov_len = 0;
1149 				ind--;
1150 				uio->uio_iovcnt--;
1151 			}
1152 			}
1153 			break;
1154 		case CRYPTO_BUF_VMPAGE:
1155 			adj = crp->crp_payload_length - result;
1156 			crp->crp_buf.cb_vm_page_len -= adj;
1157 			break;
1158 		default:
1159 			break;
1160 		}
1161 	}
1162 	free(out, M_CRYPTO_DATA);
1163 	return 0;
1164 }
1165 
1166 static int
1167 swcr_setup_cipher(struct swcr_session *ses,
1168     const struct crypto_session_params *csp)
1169 {
1170 	struct swcr_encdec *swe;
1171 	const struct enc_xform *txf;
1172 	int error;
1173 
1174 	swe = &ses->swcr_encdec;
1175 	txf = crypto_cipher(csp);
1176 	if (csp->csp_cipher_key != NULL) {
1177 		if (txf->ctxsize != 0) {
1178 			swe->sw_ctx = malloc(txf->ctxsize, M_CRYPTO_DATA,
1179 			    M_NOWAIT);
1180 			if (swe->sw_ctx == NULL)
1181 				return (ENOMEM);
1182 		}
1183 		error = txf->setkey(swe->sw_ctx,
1184 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1185 		if (error)
1186 			return (error);
1187 	}
1188 	swe->sw_exf = txf;
1189 	return (0);
1190 }
1191 
1192 static int
1193 swcr_setup_auth(struct swcr_session *ses,
1194     const struct crypto_session_params *csp)
1195 {
1196 	struct swcr_auth *swa;
1197 	const struct auth_hash *axf;
1198 
1199 	swa = &ses->swcr_auth;
1200 
1201 	axf = crypto_auth_hash(csp);
1202 	swa->sw_axf = axf;
1203 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1204 		return (EINVAL);
1205 	if (csp->csp_auth_mlen == 0)
1206 		swa->sw_mlen = axf->hashsize;
1207 	else
1208 		swa->sw_mlen = csp->csp_auth_mlen;
1209 	if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) {
1210 		swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1211 		    M_NOWAIT);
1212 		if (swa->sw_ictx == NULL)
1213 			return (ENOBUFS);
1214 	}
1215 
1216 	switch (csp->csp_auth_alg) {
1217 	case CRYPTO_SHA1_HMAC:
1218 	case CRYPTO_SHA2_224_HMAC:
1219 	case CRYPTO_SHA2_256_HMAC:
1220 	case CRYPTO_SHA2_384_HMAC:
1221 	case CRYPTO_SHA2_512_HMAC:
1222 	case CRYPTO_NULL_HMAC:
1223 	case CRYPTO_RIPEMD160_HMAC:
1224 		swa->sw_hmac = true;
1225 		if (csp->csp_auth_key != NULL) {
1226 			swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1227 			    M_NOWAIT);
1228 			if (swa->sw_octx == NULL)
1229 				return (ENOBUFS);
1230 			hmac_init_ipad(axf, csp->csp_auth_key,
1231 			    csp->csp_auth_klen, swa->sw_ictx);
1232 			hmac_init_opad(axf, csp->csp_auth_key,
1233 			    csp->csp_auth_klen, swa->sw_octx);
1234 		}
1235 		break;
1236 	case CRYPTO_SHA1:
1237 	case CRYPTO_SHA2_224:
1238 	case CRYPTO_SHA2_256:
1239 	case CRYPTO_SHA2_384:
1240 	case CRYPTO_SHA2_512:
1241 		axf->Init(swa->sw_ictx);
1242 		break;
1243 	case CRYPTO_AES_NIST_GMAC:
1244 	case CRYPTO_AES_CCM_CBC_MAC:
1245 	case CRYPTO_POLY1305:
1246 		if (csp->csp_auth_key != NULL) {
1247 			axf->Init(swa->sw_ictx);
1248 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1249 			    csp->csp_auth_klen);
1250 		}
1251 		break;
1252 	case CRYPTO_BLAKE2B:
1253 	case CRYPTO_BLAKE2S:
1254 		/*
1255 		 * Blake2b and Blake2s support an optional key but do
1256 		 * not require one.
1257 		 */
1258 		if (csp->csp_auth_klen == 0)
1259 			axf->Init(swa->sw_ictx);
1260 		else if (csp->csp_auth_key != NULL)
1261 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1262 			    csp->csp_auth_klen);
1263 		break;
1264 	}
1265 
1266 	if (csp->csp_mode == CSP_MODE_DIGEST) {
1267 		switch (csp->csp_auth_alg) {
1268 		case CRYPTO_AES_NIST_GMAC:
1269 			ses->swcr_process = swcr_gmac;
1270 			break;
1271 		case CRYPTO_AES_CCM_CBC_MAC:
1272 			ses->swcr_process = swcr_ccm_cbc_mac;
1273 			break;
1274 		default:
1275 			ses->swcr_process = swcr_authcompute;
1276 		}
1277 	}
1278 
1279 	return (0);
1280 }
1281 
1282 static int
1283 swcr_setup_aead(struct swcr_session *ses,
1284     const struct crypto_session_params *csp)
1285 {
1286 	struct swcr_auth *swa;
1287 	int error;
1288 
1289 	error = swcr_setup_cipher(ses, csp);
1290 	if (error)
1291 		return (error);
1292 
1293 	swa = &ses->swcr_auth;
1294 	if (csp->csp_auth_mlen == 0)
1295 		swa->sw_mlen = ses->swcr_encdec.sw_exf->macsize;
1296 	else
1297 		swa->sw_mlen = csp->csp_auth_mlen;
1298 	return (0);
1299 }
1300 
1301 static bool
1302 swcr_auth_supported(const struct crypto_session_params *csp)
1303 {
1304 	const struct auth_hash *axf;
1305 
1306 	axf = crypto_auth_hash(csp);
1307 	if (axf == NULL)
1308 		return (false);
1309 	switch (csp->csp_auth_alg) {
1310 	case CRYPTO_SHA1_HMAC:
1311 	case CRYPTO_SHA2_224_HMAC:
1312 	case CRYPTO_SHA2_256_HMAC:
1313 	case CRYPTO_SHA2_384_HMAC:
1314 	case CRYPTO_SHA2_512_HMAC:
1315 	case CRYPTO_NULL_HMAC:
1316 	case CRYPTO_RIPEMD160_HMAC:
1317 		break;
1318 	case CRYPTO_AES_NIST_GMAC:
1319 		switch (csp->csp_auth_klen * 8) {
1320 		case 128:
1321 		case 192:
1322 		case 256:
1323 			break;
1324 		default:
1325 			return (false);
1326 		}
1327 		if (csp->csp_auth_key == NULL)
1328 			return (false);
1329 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1330 			return (false);
1331 		break;
1332 	case CRYPTO_POLY1305:
1333 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1334 			return (false);
1335 		break;
1336 	case CRYPTO_AES_CCM_CBC_MAC:
1337 		switch (csp->csp_auth_klen * 8) {
1338 		case 128:
1339 		case 192:
1340 		case 256:
1341 			break;
1342 		default:
1343 			return (false);
1344 		}
1345 		if (csp->csp_auth_key == NULL)
1346 			return (false);
1347 		break;
1348 	}
1349 	return (true);
1350 }
1351 
1352 static bool
1353 swcr_cipher_supported(const struct crypto_session_params *csp)
1354 {
1355 	const struct enc_xform *txf;
1356 
1357 	txf = crypto_cipher(csp);
1358 	if (txf == NULL)
1359 		return (false);
1360 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1361 	    txf->ivsize != csp->csp_ivlen)
1362 		return (false);
1363 	return (true);
1364 }
1365 
1366 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
1367 
1368 static int
1369 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1370 {
1371 	if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
1372 		return (EINVAL);
1373 	switch (csp->csp_mode) {
1374 	case CSP_MODE_COMPRESS:
1375 		switch (csp->csp_cipher_alg) {
1376 		case CRYPTO_DEFLATE_COMP:
1377 			break;
1378 		default:
1379 			return (EINVAL);
1380 		}
1381 		break;
1382 	case CSP_MODE_CIPHER:
1383 		switch (csp->csp_cipher_alg) {
1384 		case CRYPTO_AES_NIST_GCM_16:
1385 		case CRYPTO_AES_CCM_16:
1386 		case CRYPTO_CHACHA20_POLY1305:
1387 			return (EINVAL);
1388 		default:
1389 			if (!swcr_cipher_supported(csp))
1390 				return (EINVAL);
1391 			break;
1392 		}
1393 		break;
1394 	case CSP_MODE_DIGEST:
1395 		if (!swcr_auth_supported(csp))
1396 			return (EINVAL);
1397 		break;
1398 	case CSP_MODE_AEAD:
1399 		switch (csp->csp_cipher_alg) {
1400 		case CRYPTO_AES_NIST_GCM_16:
1401 		case CRYPTO_AES_CCM_16:
1402 			switch (csp->csp_cipher_klen * 8) {
1403 			case 128:
1404 			case 192:
1405 			case 256:
1406 				break;
1407 			default:
1408 				return (EINVAL);
1409 			}
1410 			break;
1411 		case CRYPTO_CHACHA20_POLY1305:
1412 			break;
1413 		default:
1414 			return (EINVAL);
1415 		}
1416 		break;
1417 	case CSP_MODE_ETA:
1418 		/* AEAD algorithms cannot be used for EtA. */
1419 		switch (csp->csp_cipher_alg) {
1420 		case CRYPTO_AES_NIST_GCM_16:
1421 		case CRYPTO_AES_CCM_16:
1422 		case CRYPTO_CHACHA20_POLY1305:
1423 			return (EINVAL);
1424 		}
1425 		switch (csp->csp_auth_alg) {
1426 		case CRYPTO_AES_NIST_GMAC:
1427 		case CRYPTO_AES_CCM_CBC_MAC:
1428 			return (EINVAL);
1429 		}
1430 
1431 		if (!swcr_cipher_supported(csp) ||
1432 		    !swcr_auth_supported(csp))
1433 			return (EINVAL);
1434 		break;
1435 	default:
1436 		return (EINVAL);
1437 	}
1438 
1439 	return (CRYPTODEV_PROBE_SOFTWARE);
1440 }
1441 
1442 /*
1443  * Generate a new software session.
1444  */
1445 static int
1446 swcr_newsession(device_t dev, crypto_session_t cses,
1447     const struct crypto_session_params *csp)
1448 {
1449 	struct swcr_session *ses;
1450 	const struct comp_algo *cxf;
1451 	int error;
1452 
1453 	ses = crypto_get_driver_session(cses);
1454 
1455 	error = 0;
1456 	switch (csp->csp_mode) {
1457 	case CSP_MODE_COMPRESS:
1458 		switch (csp->csp_cipher_alg) {
1459 		case CRYPTO_DEFLATE_COMP:
1460 			cxf = &comp_algo_deflate;
1461 			break;
1462 #ifdef INVARIANTS
1463 		default:
1464 			panic("bad compression algo");
1465 #endif
1466 		}
1467 		ses->swcr_compdec.sw_cxf = cxf;
1468 		ses->swcr_process = swcr_compdec;
1469 		break;
1470 	case CSP_MODE_CIPHER:
1471 		switch (csp->csp_cipher_alg) {
1472 		case CRYPTO_NULL_CBC:
1473 			ses->swcr_process = swcr_null;
1474 			break;
1475 #ifdef INVARIANTS
1476 		case CRYPTO_AES_NIST_GCM_16:
1477 		case CRYPTO_AES_CCM_16:
1478 		case CRYPTO_CHACHA20_POLY1305:
1479 			panic("bad cipher algo");
1480 #endif
1481 		default:
1482 			error = swcr_setup_cipher(ses, csp);
1483 			if (error == 0)
1484 				ses->swcr_process = swcr_encdec;
1485 		}
1486 		break;
1487 	case CSP_MODE_DIGEST:
1488 		error = swcr_setup_auth(ses, csp);
1489 		break;
1490 	case CSP_MODE_AEAD:
1491 		switch (csp->csp_cipher_alg) {
1492 		case CRYPTO_AES_NIST_GCM_16:
1493 			error = swcr_setup_aead(ses, csp);
1494 			if (error == 0)
1495 				ses->swcr_process = swcr_gcm;
1496 			break;
1497 		case CRYPTO_AES_CCM_16:
1498 			error = swcr_setup_aead(ses, csp);
1499 			if (error == 0)
1500 				ses->swcr_process = swcr_ccm;
1501 			break;
1502 		case CRYPTO_CHACHA20_POLY1305:
1503 			error = swcr_setup_aead(ses, csp);
1504 			if (error == 0)
1505 				ses->swcr_process = swcr_chacha20_poly1305;
1506 			break;
1507 #ifdef INVARIANTS
1508 		default:
1509 			panic("bad aead algo");
1510 #endif
1511 		}
1512 		break;
1513 	case CSP_MODE_ETA:
1514 #ifdef INVARIANTS
1515 		switch (csp->csp_cipher_alg) {
1516 		case CRYPTO_AES_NIST_GCM_16:
1517 		case CRYPTO_AES_CCM_16:
1518 		case CRYPTO_CHACHA20_POLY1305:
1519 			panic("bad eta cipher algo");
1520 		}
1521 		switch (csp->csp_auth_alg) {
1522 		case CRYPTO_AES_NIST_GMAC:
1523 		case CRYPTO_AES_CCM_CBC_MAC:
1524 			panic("bad eta auth algo");
1525 		}
1526 #endif
1527 
1528 		error = swcr_setup_auth(ses, csp);
1529 		if (error)
1530 			break;
1531 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1532 			/* Effectively degrade to digest mode. */
1533 			ses->swcr_process = swcr_authcompute;
1534 			break;
1535 		}
1536 
1537 		error = swcr_setup_cipher(ses, csp);
1538 		if (error == 0)
1539 			ses->swcr_process = swcr_eta;
1540 		break;
1541 	default:
1542 		error = EINVAL;
1543 	}
1544 
1545 	if (error)
1546 		swcr_freesession(dev, cses);
1547 	return (error);
1548 }
1549 
1550 static void
1551 swcr_freesession(device_t dev, crypto_session_t cses)
1552 {
1553 	struct swcr_session *ses;
1554 
1555 	ses = crypto_get_driver_session(cses);
1556 
1557 	zfree(ses->swcr_encdec.sw_ctx, M_CRYPTO_DATA);
1558 	zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
1559 	zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
1560 }
1561 
1562 /*
1563  * Process a software request.
1564  */
1565 static int
1566 swcr_process(device_t dev, struct cryptop *crp, int hint)
1567 {
1568 	struct swcr_session *ses;
1569 
1570 	ses = crypto_get_driver_session(crp->crp_session);
1571 
1572 	crp->crp_etype = ses->swcr_process(ses, crp);
1573 
1574 	crypto_done(crp);
1575 	return (0);
1576 }
1577 
1578 static void
1579 swcr_identify(driver_t *drv, device_t parent)
1580 {
1581 	/* NB: order 10 is so we get attached after h/w devices */
1582 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1583 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1584 		panic("cryptosoft: could not attach");
1585 }
1586 
1587 static int
1588 swcr_probe(device_t dev)
1589 {
1590 	device_set_desc(dev, "software crypto");
1591 	device_quiet(dev);
1592 	return (BUS_PROBE_NOWILDCARD);
1593 }
1594 
1595 static int
1596 swcr_attach(device_t dev)
1597 {
1598 
1599 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1600 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1601 	if (swcr_id < 0) {
1602 		device_printf(dev, "cannot initialize!");
1603 		return (ENXIO);
1604 	}
1605 
1606 	return (0);
1607 }
1608 
1609 static int
1610 swcr_detach(device_t dev)
1611 {
1612 	crypto_unregister_all(swcr_id);
1613 	return 0;
1614 }
1615 
1616 static device_method_t swcr_methods[] = {
1617 	DEVMETHOD(device_identify,	swcr_identify),
1618 	DEVMETHOD(device_probe,		swcr_probe),
1619 	DEVMETHOD(device_attach,	swcr_attach),
1620 	DEVMETHOD(device_detach,	swcr_detach),
1621 
1622 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1623 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1624 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1625 	DEVMETHOD(cryptodev_process,	swcr_process),
1626 
1627 	{0, 0},
1628 };
1629 
1630 static driver_t swcr_driver = {
1631 	"cryptosoft",
1632 	swcr_methods,
1633 	0,		/* NB: no softc */
1634 };
1635 static devclass_t swcr_devclass;
1636 
1637 /*
1638  * NB: We explicitly reference the crypto module so we
1639  * get the necessary ordering when built as a loadable
1640  * module.  This is required because we bundle the crypto
1641  * module code together with the cryptosoft driver (otherwise
1642  * normal module dependencies would handle things).
1643  */
1644 extern int crypto_modevent(struct module *, int, void *);
1645 /* XXX where to attach */
1646 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1647 MODULE_VERSION(cryptosoft, 1);
1648 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1649