xref: /freebsd/sys/opencrypto/cryptosoft.c (revision b3d14eaccc5f606690d99b1998bfdf32a22404f6)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014-2021 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Portions of this software were developed by Ararat River
20  * Consulting, LLC under sponsorship of the FreeBSD Foundation.
21  *
22  * Permission to use, copy, and modify this software with or without fee
23  * is hereby granted, provided that this entire notice is included in
24  * all source code copies of any software which is or includes a copy or
25  * modification of this software.
26  *
27  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
28  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
29  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
30  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
31  * PURPOSE.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/malloc.h>
40 #include <sys/mbuf.h>
41 #include <sys/module.h>
42 #include <sys/sysctl.h>
43 #include <sys/errno.h>
44 #include <sys/random.h>
45 #include <sys/kernel.h>
46 #include <sys/uio.h>
47 #include <sys/endian.h>
48 #include <sys/limits.h>
49 
50 #include <crypto/sha1.h>
51 #include <opencrypto/rmd160.h>
52 
53 #include <opencrypto/cryptodev.h>
54 #include <opencrypto/xform.h>
55 
56 #include <sys/kobj.h>
57 #include <sys/bus.h>
58 #include "cryptodev_if.h"
59 
60 struct swcr_auth {
61 	void		*sw_ictx;
62 	void		*sw_octx;
63 	const struct auth_hash *sw_axf;
64 	uint16_t	sw_mlen;
65 	bool		sw_hmac;
66 };
67 
68 struct swcr_encdec {
69 	void		*sw_ctx;
70 	const struct enc_xform *sw_exf;
71 };
72 
73 struct swcr_compdec {
74 	const struct comp_algo *sw_cxf;
75 };
76 
77 struct swcr_session {
78 	int	(*swcr_process)(const struct swcr_session *, struct cryptop *);
79 
80 	struct swcr_auth swcr_auth;
81 	struct swcr_encdec swcr_encdec;
82 	struct swcr_compdec swcr_compdec;
83 };
84 
85 static	int32_t swcr_id;
86 
87 static	void swcr_freesession(device_t dev, crypto_session_t cses);
88 
89 /* Used for CRYPTO_NULL_CBC. */
90 static int
91 swcr_null(const struct swcr_session *ses, struct cryptop *crp)
92 {
93 
94 	return (0);
95 }
96 
97 /*
98  * Apply a symmetric encryption/decryption algorithm.
99  */
100 static int
101 swcr_encdec(const struct swcr_session *ses, struct cryptop *crp)
102 {
103 	unsigned char blk[EALG_MAX_BLOCK_LEN];
104 	const struct crypto_session_params *csp;
105 	const struct enc_xform *exf;
106 	const struct swcr_encdec *sw;
107 	void *ctx;
108 	size_t inlen, outlen;
109 	int blks, resid;
110 	struct crypto_buffer_cursor cc_in, cc_out;
111 	const unsigned char *inblk;
112 	unsigned char *outblk;
113 	int error;
114 	bool encrypting;
115 
116 	error = 0;
117 
118 	sw = &ses->swcr_encdec;
119 	exf = sw->sw_exf;
120 	csp = crypto_get_params(crp->crp_session);
121 
122 	if (exf->native_blocksize == 0) {
123 		/* Check for non-padded data */
124 		if ((crp->crp_payload_length % exf->blocksize) != 0)
125 			return (EINVAL);
126 
127 		blks = exf->blocksize;
128 	} else
129 		blks = exf->native_blocksize;
130 
131 	if (exf == &enc_xform_aes_icm &&
132 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
133 		return (EINVAL);
134 
135 	ctx = __builtin_alloca(exf->ctxsize);
136 	if (crp->crp_cipher_key != NULL) {
137 		error = exf->setkey(ctx, crp->crp_cipher_key,
138 		    csp->csp_cipher_klen);
139 		if (error)
140 			return (error);
141 	} else
142 		memcpy(ctx, sw->sw_ctx, exf->ctxsize);
143 
144 	crypto_read_iv(crp, blk);
145 	exf->reinit(ctx, blk, csp->csp_ivlen);
146 
147 	crypto_cursor_init(&cc_in, &crp->crp_buf);
148 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
149 	inblk = crypto_cursor_segment(&cc_in, &inlen);
150 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
151 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
152 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
153 	} else
154 		cc_out = cc_in;
155 	outblk = crypto_cursor_segment(&cc_out, &outlen);
156 
157 	resid = crp->crp_payload_length;
158 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
159 
160 	/*
161 	 * Loop through encrypting blocks.  'inlen' is the remaining
162 	 * length of the current segment in the input buffer.
163 	 * 'outlen' is the remaining length of current segment in the
164 	 * output buffer.
165 	 */
166 	while (resid >= blks) {
167 		/*
168 		 * If the current block is not contained within the
169 		 * current input/output segment, use 'blk' as a local
170 		 * buffer.
171 		 */
172 		if (inlen < blks) {
173 			crypto_cursor_copydata(&cc_in, blks, blk);
174 			inblk = blk;
175 		}
176 		if (outlen < blks)
177 			outblk = blk;
178 
179 		if (encrypting)
180 			exf->encrypt(ctx, inblk, outblk);
181 		else
182 			exf->decrypt(ctx, inblk, outblk);
183 
184 		if (inlen < blks) {
185 			inblk = crypto_cursor_segment(&cc_in, &inlen);
186 		} else {
187 			crypto_cursor_advance(&cc_in, blks);
188 			inlen -= blks;
189 			inblk += blks;
190 		}
191 
192 		if (outlen < blks) {
193 			crypto_cursor_copyback(&cc_out, blks, blk);
194 			outblk = crypto_cursor_segment(&cc_out, &outlen);
195 		} else {
196 			crypto_cursor_advance(&cc_out, blks);
197 			outlen -= blks;
198 			outblk += blks;
199 		}
200 
201 		resid -= blks;
202 	}
203 
204 	/* Handle trailing partial block for stream ciphers. */
205 	if (resid > 0) {
206 		KASSERT(exf->native_blocksize != 0,
207 		    ("%s: partial block of %d bytes for cipher %s",
208 		    __func__, resid, exf->name));
209 		KASSERT(resid < blks, ("%s: partial block too big", __func__));
210 
211 		inblk = crypto_cursor_segment(&cc_in, &inlen);
212 		outblk = crypto_cursor_segment(&cc_out, &outlen);
213 		if (inlen < resid) {
214 			crypto_cursor_copydata(&cc_in, resid, blk);
215 			inblk = blk;
216 		}
217 		if (outlen < resid)
218 			outblk = blk;
219 		if (encrypting)
220 			exf->encrypt_last(ctx, inblk, outblk,
221 			    resid);
222 		else
223 			exf->decrypt_last(ctx, inblk, outblk,
224 			    resid);
225 		if (outlen < resid)
226 			crypto_cursor_copyback(&cc_out, resid, blk);
227 	}
228 
229 	explicit_bzero(ctx, exf->ctxsize);
230 	explicit_bzero(blk, sizeof(blk));
231 	return (0);
232 }
233 
234 /*
235  * Compute or verify hash.
236  */
237 static int
238 swcr_authcompute(const struct swcr_session *ses, struct cryptop *crp)
239 {
240 	struct {
241 		union authctx ctx;
242 		u_char aalg[HASH_MAX_LEN];
243 		u_char uaalg[HASH_MAX_LEN];
244 	} s;
245 	const struct crypto_session_params *csp;
246 	const struct swcr_auth *sw;
247 	const struct auth_hash *axf;
248 	int err;
249 
250 	sw = &ses->swcr_auth;
251 
252 	axf = sw->sw_axf;
253 
254 	csp = crypto_get_params(crp->crp_session);
255 	if (crp->crp_auth_key != NULL) {
256 		if (sw->sw_hmac) {
257 			hmac_init_ipad(axf, crp->crp_auth_key,
258 			    csp->csp_auth_klen, &s.ctx);
259 		} else {
260 			axf->Init(&s.ctx);
261 			axf->Setkey(&s.ctx, crp->crp_auth_key,
262 			    csp->csp_auth_klen);
263 		}
264 	} else
265 		memcpy(&s.ctx, sw->sw_ictx, axf->ctxsize);
266 
267 	if (crp->crp_aad != NULL)
268 		err = axf->Update(&s.ctx, crp->crp_aad, crp->crp_aad_length);
269 	else
270 		err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
271 		    axf->Update, &s.ctx);
272 	if (err)
273 		goto out;
274 
275 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
276 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
277 		err = crypto_apply_buf(&crp->crp_obuf,
278 		    crp->crp_payload_output_start, crp->crp_payload_length,
279 		    axf->Update, &s.ctx);
280 	else
281 		err = crypto_apply(crp, crp->crp_payload_start,
282 		    crp->crp_payload_length, axf->Update, &s.ctx);
283 	if (err)
284 		goto out;
285 
286 	if (csp->csp_flags & CSP_F_ESN)
287 		axf->Update(&s.ctx, crp->crp_esn, 4);
288 
289 	axf->Final(s.aalg, &s.ctx);
290 	if (sw->sw_hmac) {
291 		if (crp->crp_auth_key != NULL)
292 			hmac_init_opad(axf, crp->crp_auth_key,
293 			    csp->csp_auth_klen, &s.ctx);
294 		else
295 			memcpy(&s.ctx, sw->sw_octx, axf->ctxsize);
296 		axf->Update(&s.ctx, s.aalg, axf->hashsize);
297 		axf->Final(s.aalg, &s.ctx);
298 	}
299 
300 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
301 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, s.uaalg);
302 		if (timingsafe_bcmp(s.aalg, s.uaalg, sw->sw_mlen) != 0)
303 			err = EBADMSG;
304 	} else {
305 		/* Inject the authentication data */
306 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, s.aalg);
307 	}
308 out:
309 	explicit_bzero(&s, sizeof(s));
310 	return (err);
311 }
312 
313 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
314 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
315 
316 static int
317 swcr_gmac(const struct swcr_session *ses, struct cryptop *crp)
318 {
319 	struct {
320 		union authctx ctx;
321 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
322 		u_char tag[GMAC_DIGEST_LEN];
323 		u_char tag2[GMAC_DIGEST_LEN];
324 	} s;
325 	u_char *blk = (u_char *)s.blkbuf;
326 	struct crypto_buffer_cursor cc;
327 	const u_char *inblk;
328 	const struct swcr_auth *swa;
329 	const struct auth_hash *axf;
330 	uint32_t *blkp;
331 	size_t len;
332 	int blksz, error, ivlen, resid;
333 
334 	swa = &ses->swcr_auth;
335 	axf = swa->sw_axf;
336 	blksz = GMAC_BLOCK_LEN;
337 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
338 	    __func__));
339 
340 	if (crp->crp_auth_key != NULL) {
341 		axf->Init(&s.ctx);
342 		axf->Setkey(&s.ctx, crp->crp_auth_key,
343 		    crypto_get_params(crp->crp_session)->csp_auth_klen);
344 	} else
345 		memcpy(&s.ctx, swa->sw_ictx, axf->ctxsize);
346 
347 	/* Initialize the IV */
348 	ivlen = AES_GCM_IV_LEN;
349 	crypto_read_iv(crp, blk);
350 
351 	axf->Reinit(&s.ctx, blk, ivlen);
352 	crypto_cursor_init(&cc, &crp->crp_buf);
353 	crypto_cursor_advance(&cc, crp->crp_payload_start);
354 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
355 		inblk = crypto_cursor_segment(&cc, &len);
356 		if (len >= blksz) {
357 			len = rounddown(MIN(len, resid), blksz);
358 			crypto_cursor_advance(&cc, len);
359 		} else {
360 			len = blksz;
361 			crypto_cursor_copydata(&cc, len, blk);
362 			inblk = blk;
363 		}
364 		axf->Update(&s.ctx, inblk, len);
365 	}
366 	if (resid > 0) {
367 		memset(blk, 0, blksz);
368 		crypto_cursor_copydata(&cc, resid, blk);
369 		axf->Update(&s.ctx, blk, blksz);
370 	}
371 
372 	/* length block */
373 	memset(blk, 0, blksz);
374 	blkp = (uint32_t *)blk + 1;
375 	*blkp = htobe32(crp->crp_payload_length * 8);
376 	axf->Update(&s.ctx, blk, blksz);
377 
378 	/* Finalize MAC */
379 	axf->Final(s.tag, &s.ctx);
380 
381 	error = 0;
382 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
383 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
384 		    s.tag2);
385 		if (timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen) != 0)
386 			error = EBADMSG;
387 	} else {
388 		/* Inject the authentication data */
389 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, s.tag);
390 	}
391 	explicit_bzero(&s, sizeof(s));
392 	return (error);
393 }
394 
395 static int
396 swcr_gcm(const struct swcr_session *ses, struct cryptop *crp)
397 {
398 	struct {
399 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
400 		u_char tag[GMAC_DIGEST_LEN];
401 		u_char tag2[GMAC_DIGEST_LEN];
402 	} s;
403 	u_char *blk = (u_char *)s.blkbuf;
404 	struct crypto_buffer_cursor cc_in, cc_out;
405 	const u_char *inblk;
406 	u_char *outblk;
407 	const struct swcr_auth *swa;
408 	const struct swcr_encdec *swe;
409 	const struct enc_xform *exf;
410 	void *ctx;
411 	uint32_t *blkp;
412 	size_t len;
413 	int blksz, error, ivlen, r, resid;
414 
415 	swa = &ses->swcr_auth;
416 	swe = &ses->swcr_encdec;
417 	exf = swe->sw_exf;
418 	blksz = GMAC_BLOCK_LEN;
419 	KASSERT(blksz == exf->native_blocksize,
420 	    ("%s: blocksize mismatch", __func__));
421 
422 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
423 		return (EINVAL);
424 
425 	ivlen = AES_GCM_IV_LEN;
426 
427 	ctx = __builtin_alloca(exf->ctxsize);
428 	if (crp->crp_cipher_key != NULL)
429 		exf->setkey(ctx, crp->crp_cipher_key,
430 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
431 	else
432 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
433 	exf->reinit(ctx, crp->crp_iv, ivlen);
434 
435 	/* Supply MAC with AAD */
436 	if (crp->crp_aad != NULL) {
437 		len = rounddown(crp->crp_aad_length, blksz);
438 		if (len != 0)
439 			exf->update(ctx, crp->crp_aad, len);
440 		if (crp->crp_aad_length != len) {
441 			memset(blk, 0, blksz);
442 			memcpy(blk, (char *)crp->crp_aad + len,
443 			    crp->crp_aad_length - len);
444 			exf->update(ctx, blk, blksz);
445 		}
446 	} else {
447 		crypto_cursor_init(&cc_in, &crp->crp_buf);
448 		crypto_cursor_advance(&cc_in, crp->crp_aad_start);
449 		for (resid = crp->crp_aad_length; resid >= blksz;
450 		     resid -= len) {
451 			inblk = crypto_cursor_segment(&cc_in, &len);
452 			if (len >= blksz) {
453 				len = rounddown(MIN(len, resid), blksz);
454 				crypto_cursor_advance(&cc_in, len);
455 			} else {
456 				len = blksz;
457 				crypto_cursor_copydata(&cc_in, len, blk);
458 				inblk = blk;
459 			}
460 			exf->update(ctx, inblk, len);
461 		}
462 		if (resid > 0) {
463 			memset(blk, 0, blksz);
464 			crypto_cursor_copydata(&cc_in, resid, blk);
465 			exf->update(ctx, blk, blksz);
466 		}
467 	}
468 
469 	/* Do encryption with MAC */
470 	crypto_cursor_init(&cc_in, &crp->crp_buf);
471 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
472 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
473 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
474 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
475 	} else
476 		cc_out = cc_in;
477 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
478 		inblk = crypto_cursor_segment(&cc_in, &len);
479 		if (len < blksz) {
480 			crypto_cursor_copydata(&cc_in, blksz, blk);
481 			inblk = blk;
482 		} else {
483 			crypto_cursor_advance(&cc_in, blksz);
484 		}
485 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
486 			outblk = crypto_cursor_segment(&cc_out, &len);
487 			if (len < blksz)
488 				outblk = blk;
489 			exf->encrypt(ctx, inblk, outblk);
490 			exf->update(ctx, outblk, blksz);
491 			if (outblk == blk)
492 				crypto_cursor_copyback(&cc_out, blksz, blk);
493 			else
494 				crypto_cursor_advance(&cc_out, blksz);
495 		} else {
496 			exf->update(ctx, inblk, blksz);
497 		}
498 	}
499 	if (resid > 0) {
500 		crypto_cursor_copydata(&cc_in, resid, blk);
501 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
502 			exf->encrypt_last(ctx, blk, blk, resid);
503 			crypto_cursor_copyback(&cc_out, resid, blk);
504 		}
505 		exf->update(ctx, blk, resid);
506 	}
507 
508 	/* length block */
509 	memset(blk, 0, blksz);
510 	blkp = (uint32_t *)blk + 1;
511 	*blkp = htobe32(crp->crp_aad_length * 8);
512 	blkp = (uint32_t *)blk + 3;
513 	*blkp = htobe32(crp->crp_payload_length * 8);
514 	exf->update(ctx, blk, blksz);
515 
516 	/* Finalize MAC */
517 	exf->final(s.tag, ctx);
518 
519 	/* Validate tag */
520 	error = 0;
521 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
522 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
523 		    s.tag2);
524 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
525 		if (r != 0) {
526 			error = EBADMSG;
527 			goto out;
528 		}
529 
530 		/* tag matches, decrypt data */
531 		crypto_cursor_init(&cc_in, &crp->crp_buf);
532 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
533 		for (resid = crp->crp_payload_length; resid > blksz;
534 		     resid -= blksz) {
535 			inblk = crypto_cursor_segment(&cc_in, &len);
536 			if (len < blksz) {
537 				crypto_cursor_copydata(&cc_in, blksz, blk);
538 				inblk = blk;
539 			} else
540 				crypto_cursor_advance(&cc_in, blksz);
541 			outblk = crypto_cursor_segment(&cc_out, &len);
542 			if (len < blksz)
543 				outblk = blk;
544 			exf->decrypt(ctx, inblk, outblk);
545 			if (outblk == blk)
546 				crypto_cursor_copyback(&cc_out, blksz, blk);
547 			else
548 				crypto_cursor_advance(&cc_out, blksz);
549 		}
550 		if (resid > 0) {
551 			crypto_cursor_copydata(&cc_in, resid, blk);
552 			exf->decrypt_last(ctx, blk, blk, resid);
553 			crypto_cursor_copyback(&cc_out, resid, blk);
554 		}
555 	} else {
556 		/* Inject the authentication data */
557 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
558 		    s.tag);
559 	}
560 
561 out:
562 	explicit_bzero(ctx, exf->ctxsize);
563 	explicit_bzero(&s, sizeof(s));
564 
565 	return (error);
566 }
567 
568 static void
569 build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length,
570     u_int data_length, u_int tag_length, uint8_t *b0)
571 {
572 	uint8_t *bp;
573 	uint8_t flags, L;
574 
575 	KASSERT(nonce_length >= 7 && nonce_length <= 13,
576 	    ("nonce_length must be between 7 and 13 bytes"));
577 
578 	/*
579 	 * Need to determine the L field value.  This is the number of
580 	 * bytes needed to specify the length of the message; the length
581 	 * is whatever is left in the 16 bytes after specifying flags and
582 	 * the nonce.
583 	 */
584 	L = 15 - nonce_length;
585 
586 	flags = ((aad_length > 0) << 6) +
587 	    (((tag_length - 2) / 2) << 3) +
588 	    L - 1;
589 
590 	/*
591 	 * Now we need to set up the first block, which has flags, nonce,
592 	 * and the message length.
593 	 */
594 	b0[0] = flags;
595 	memcpy(b0 + 1, nonce, nonce_length);
596 	bp = b0 + 1 + nonce_length;
597 
598 	/* Need to copy L' [aka L-1] bytes of data_length */
599 	for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) {
600 		*dst = data_length;
601 		data_length >>= 8;
602 	}
603 }
604 
605 /* NB: OCF only supports AAD lengths < 2^32. */
606 static int
607 build_ccm_aad_length(u_int aad_length, uint8_t *blk)
608 {
609 	if (aad_length < ((1 << 16) - (1 << 8))) {
610 		be16enc(blk, aad_length);
611 		return (sizeof(uint16_t));
612 	} else {
613 		blk[0] = 0xff;
614 		blk[1] = 0xfe;
615 		be32enc(blk + 2, aad_length);
616 		return (2 + sizeof(uint32_t));
617 	}
618 }
619 
620 static int
621 swcr_ccm_cbc_mac(const struct swcr_session *ses, struct cryptop *crp)
622 {
623 	struct {
624 		union authctx ctx;
625 		u_char blk[CCM_CBC_BLOCK_LEN];
626 		u_char tag[AES_CBC_MAC_HASH_LEN];
627 		u_char tag2[AES_CBC_MAC_HASH_LEN];
628 	} s;
629 	const struct crypto_session_params *csp;
630 	const struct swcr_auth *swa;
631 	const struct auth_hash *axf;
632 	int error, ivlen, len;
633 
634 	csp = crypto_get_params(crp->crp_session);
635 	swa = &ses->swcr_auth;
636 	axf = swa->sw_axf;
637 
638 	if (crp->crp_auth_key != NULL) {
639 		axf->Init(&s.ctx);
640 		axf->Setkey(&s.ctx, crp->crp_auth_key, csp->csp_auth_klen);
641 	} else
642 		memcpy(&s.ctx, swa->sw_ictx, axf->ctxsize);
643 
644 	/* Initialize the IV */
645 	ivlen = csp->csp_ivlen;
646 
647 	/* Supply MAC with IV */
648 	axf->Reinit(&s.ctx, crp->crp_iv, ivlen);
649 
650 	/* Supply MAC with b0. */
651 	build_ccm_b0(crp->crp_iv, ivlen, crp->crp_payload_length, 0,
652 	    swa->sw_mlen, s.blk);
653 	axf->Update(&s.ctx, s.blk, CCM_CBC_BLOCK_LEN);
654 
655 	len = build_ccm_aad_length(crp->crp_payload_length, s.blk);
656 	axf->Update(&s.ctx, s.blk, len);
657 
658 	crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
659 	    axf->Update, &s.ctx);
660 
661 	/* Finalize MAC */
662 	axf->Final(s.tag, &s.ctx);
663 
664 	error = 0;
665 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
666 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
667 		    s.tag2);
668 		if (timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen) != 0)
669 			error = EBADMSG;
670 	} else {
671 		/* Inject the authentication data */
672 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
673 		    s.tag);
674 	}
675 	explicit_bzero(&s, sizeof(s));
676 	return (error);
677 }
678 
679 static int
680 swcr_ccm(const struct swcr_session *ses, struct cryptop *crp)
681 {
682 	const struct crypto_session_params *csp;
683 	struct {
684 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
685 		u_char tag[AES_CBC_MAC_HASH_LEN];
686 		u_char tag2[AES_CBC_MAC_HASH_LEN];
687 	} s;
688 	u_char *blk = (u_char *)s.blkbuf;
689 	struct crypto_buffer_cursor cc_in, cc_out;
690 	const u_char *inblk;
691 	u_char *outblk;
692 	const struct swcr_auth *swa;
693 	const struct swcr_encdec *swe;
694 	const struct enc_xform *exf;
695 	void *ctx;
696 	size_t len;
697 	int blksz, error, ivlen, r, resid;
698 
699 	csp = crypto_get_params(crp->crp_session);
700 	swa = &ses->swcr_auth;
701 	swe = &ses->swcr_encdec;
702 	exf = swe->sw_exf;
703 	blksz = AES_BLOCK_LEN;
704 	KASSERT(blksz == exf->native_blocksize,
705 	    ("%s: blocksize mismatch", __func__));
706 
707 	if (crp->crp_payload_length > ccm_max_payload_length(csp))
708 		return (EMSGSIZE);
709 
710 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
711 		return (EINVAL);
712 
713 	ivlen = csp->csp_ivlen;
714 
715 	ctx = __builtin_alloca(exf->ctxsize);
716 	if (crp->crp_cipher_key != NULL)
717 		exf->setkey(ctx, crp->crp_cipher_key,
718 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
719 	else
720 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
721 	exf->reinit(ctx, crp->crp_iv, ivlen);
722 
723 	/* Supply MAC with b0. */
724 	_Static_assert(sizeof(s.blkbuf) >= CCM_CBC_BLOCK_LEN,
725 	    "blkbuf too small for b0");
726 	build_ccm_b0(crp->crp_iv, ivlen, crp->crp_aad_length,
727 	    crp->crp_payload_length, swa->sw_mlen, blk);
728 	exf->update(ctx, blk, CCM_CBC_BLOCK_LEN);
729 
730 	/* Supply MAC with AAD */
731 	if (crp->crp_aad_length != 0) {
732 		len = build_ccm_aad_length(crp->crp_aad_length, blk);
733 		exf->update(ctx, blk, len);
734 		if (crp->crp_aad != NULL)
735 			exf->update(ctx, crp->crp_aad, crp->crp_aad_length);
736 		else
737 			crypto_apply(crp, crp->crp_aad_start,
738 			    crp->crp_aad_length, exf->update, ctx);
739 
740 		/* Pad the AAD (including length field) to a full block. */
741 		len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN;
742 		if (len != 0) {
743 			len = CCM_CBC_BLOCK_LEN - len;
744 			memset(blk, 0, CCM_CBC_BLOCK_LEN);
745 			exf->update(ctx, blk, len);
746 		}
747 	}
748 
749 	/* Do encryption/decryption with MAC */
750 	crypto_cursor_init(&cc_in, &crp->crp_buf);
751 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
752 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
753 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
754 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
755 	} else
756 		cc_out = cc_in;
757 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
758 		inblk = crypto_cursor_segment(&cc_in, &len);
759 		if (len < blksz) {
760 			crypto_cursor_copydata(&cc_in, blksz, blk);
761 			inblk = blk;
762 		} else
763 			crypto_cursor_advance(&cc_in, blksz);
764 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
765 			outblk = crypto_cursor_segment(&cc_out, &len);
766 			if (len < blksz)
767 				outblk = blk;
768 			exf->update(ctx, inblk, blksz);
769 			exf->encrypt(ctx, inblk, outblk);
770 			if (outblk == blk)
771 				crypto_cursor_copyback(&cc_out, blksz, blk);
772 			else
773 				crypto_cursor_advance(&cc_out, blksz);
774 		} else {
775 			/*
776 			 * One of the problems with CCM+CBC is that
777 			 * the authentication is done on the
778 			 * unencrypted data.  As a result, we have to
779 			 * decrypt the data twice: once to generate
780 			 * the tag and a second time after the tag is
781 			 * verified.
782 			 */
783 			exf->decrypt(ctx, inblk, blk);
784 			exf->update(ctx, blk, blksz);
785 		}
786 	}
787 	if (resid > 0) {
788 		crypto_cursor_copydata(&cc_in, resid, blk);
789 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
790 			exf->update(ctx, blk, resid);
791 			exf->encrypt_last(ctx, blk, blk, resid);
792 			crypto_cursor_copyback(&cc_out, resid, blk);
793 		} else {
794 			exf->decrypt_last(ctx, blk, blk, resid);
795 			exf->update(ctx, blk, resid);
796 		}
797 	}
798 
799 	/* Finalize MAC */
800 	exf->final(s.tag, ctx);
801 
802 	/* Validate tag */
803 	error = 0;
804 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
805 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
806 		    s.tag2);
807 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
808 		if (r != 0) {
809 			error = EBADMSG;
810 			goto out;
811 		}
812 
813 		/* tag matches, decrypt data */
814 		exf->reinit(ctx, crp->crp_iv, ivlen);
815 		crypto_cursor_init(&cc_in, &crp->crp_buf);
816 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
817 		for (resid = crp->crp_payload_length; resid > blksz;
818 		     resid -= blksz) {
819 			inblk = crypto_cursor_segment(&cc_in, &len);
820 			if (len < blksz) {
821 				crypto_cursor_copydata(&cc_in, blksz, blk);
822 				inblk = blk;
823 			} else
824 				crypto_cursor_advance(&cc_in, blksz);
825 			outblk = crypto_cursor_segment(&cc_out, &len);
826 			if (len < blksz)
827 				outblk = blk;
828 			exf->decrypt(ctx, inblk, outblk);
829 			if (outblk == blk)
830 				crypto_cursor_copyback(&cc_out, blksz, blk);
831 			else
832 				crypto_cursor_advance(&cc_out, blksz);
833 		}
834 		if (resid > 0) {
835 			crypto_cursor_copydata(&cc_in, resid, blk);
836 			exf->decrypt_last(ctx, blk, blk, resid);
837 			crypto_cursor_copyback(&cc_out, resid, blk);
838 		}
839 	} else {
840 		/* Inject the authentication data */
841 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
842 		    s.tag);
843 	}
844 
845 out:
846 	explicit_bzero(ctx, exf->ctxsize);
847 	explicit_bzero(&s, sizeof(s));
848 	return (error);
849 }
850 
851 static int
852 swcr_chacha20_poly1305(const struct swcr_session *ses, struct cryptop *crp)
853 {
854 	const struct crypto_session_params *csp;
855 	struct {
856 		uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))];
857 		u_char tag[POLY1305_HASH_LEN];
858 		u_char tag2[POLY1305_HASH_LEN];
859 	} s;
860 	u_char *blk = (u_char *)s.blkbuf;
861 	struct crypto_buffer_cursor cc_in, cc_out;
862 	const u_char *inblk;
863 	u_char *outblk;
864 	uint64_t *blkp;
865 	const struct swcr_auth *swa;
866 	const struct swcr_encdec *swe;
867 	const struct enc_xform *exf;
868 	void *ctx;
869 	size_t len;
870 	int blksz, error, r, resid;
871 
872 	swa = &ses->swcr_auth;
873 	swe = &ses->swcr_encdec;
874 	exf = swe->sw_exf;
875 	blksz = exf->native_blocksize;
876 	KASSERT(blksz <= sizeof(s.blkbuf), ("%s: blocksize mismatch", __func__));
877 
878 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
879 		return (EINVAL);
880 
881 	csp = crypto_get_params(crp->crp_session);
882 
883 	ctx = __builtin_alloca(exf->ctxsize);
884 	if (crp->crp_cipher_key != NULL)
885 		exf->setkey(ctx, crp->crp_cipher_key,
886 		    csp->csp_cipher_klen);
887 	else
888 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
889 	exf->reinit(ctx, crp->crp_iv, csp->csp_ivlen);
890 
891 	/* Supply MAC with AAD */
892 	if (crp->crp_aad != NULL)
893 		exf->update(ctx, crp->crp_aad, crp->crp_aad_length);
894 	else
895 		crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
896 		    exf->update, ctx);
897 	if (crp->crp_aad_length % POLY1305_BLOCK_LEN != 0) {
898 		/* padding1 */
899 		memset(blk, 0, POLY1305_BLOCK_LEN);
900 		exf->update(ctx, blk, POLY1305_BLOCK_LEN -
901 		    crp->crp_aad_length % POLY1305_BLOCK_LEN);
902 	}
903 
904 	/* Do encryption with MAC */
905 	crypto_cursor_init(&cc_in, &crp->crp_buf);
906 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
907 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
908 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
909 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
910 	} else
911 		cc_out = cc_in;
912 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) {
913 		inblk = crypto_cursor_segment(&cc_in, &len);
914 		if (len < blksz) {
915 			crypto_cursor_copydata(&cc_in, blksz, blk);
916 			inblk = blk;
917 		} else
918 			crypto_cursor_advance(&cc_in, blksz);
919 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
920 			outblk = crypto_cursor_segment(&cc_out, &len);
921 			if (len < blksz)
922 				outblk = blk;
923 			exf->encrypt(ctx, inblk, outblk);
924 			exf->update(ctx, outblk, blksz);
925 			if (outblk == blk)
926 				crypto_cursor_copyback(&cc_out, blksz, blk);
927 			else
928 				crypto_cursor_advance(&cc_out, blksz);
929 		} else {
930 			exf->update(ctx, inblk, blksz);
931 		}
932 	}
933 	if (resid > 0) {
934 		crypto_cursor_copydata(&cc_in, resid, blk);
935 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
936 			exf->encrypt_last(ctx, blk, blk, resid);
937 			crypto_cursor_copyback(&cc_out, resid, blk);
938 		}
939 		exf->update(ctx, blk, resid);
940 		if (resid % POLY1305_BLOCK_LEN != 0) {
941 			/* padding2 */
942 			memset(blk, 0, POLY1305_BLOCK_LEN);
943 			exf->update(ctx, blk, POLY1305_BLOCK_LEN -
944 			    resid % POLY1305_BLOCK_LEN);
945 		}
946 	}
947 
948 	/* lengths */
949 	blkp = (uint64_t *)blk;
950 	blkp[0] = htole64(crp->crp_aad_length);
951 	blkp[1] = htole64(crp->crp_payload_length);
952 	exf->update(ctx, blk, sizeof(uint64_t) * 2);
953 
954 	/* Finalize MAC */
955 	exf->final(s.tag, ctx);
956 
957 	/* Validate tag */
958 	error = 0;
959 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
960 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
961 		    s.tag2);
962 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
963 		if (r != 0) {
964 			error = EBADMSG;
965 			goto out;
966 		}
967 
968 		/* tag matches, decrypt data */
969 		crypto_cursor_init(&cc_in, &crp->crp_buf);
970 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
971 		for (resid = crp->crp_payload_length; resid > blksz;
972 		     resid -= blksz) {
973 			inblk = crypto_cursor_segment(&cc_in, &len);
974 			if (len < blksz) {
975 				crypto_cursor_copydata(&cc_in, blksz, blk);
976 				inblk = blk;
977 			} else
978 				crypto_cursor_advance(&cc_in, blksz);
979 			outblk = crypto_cursor_segment(&cc_out, &len);
980 			if (len < blksz)
981 				outblk = blk;
982 			exf->decrypt(ctx, inblk, outblk);
983 			if (outblk == blk)
984 				crypto_cursor_copyback(&cc_out, blksz, blk);
985 			else
986 				crypto_cursor_advance(&cc_out, blksz);
987 		}
988 		if (resid > 0) {
989 			crypto_cursor_copydata(&cc_in, resid, blk);
990 			exf->decrypt_last(ctx, blk, blk, resid);
991 			crypto_cursor_copyback(&cc_out, resid, blk);
992 		}
993 	} else {
994 		/* Inject the authentication data */
995 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
996 		    s.tag);
997 	}
998 
999 out:
1000 	explicit_bzero(ctx, exf->ctxsize);
1001 	explicit_bzero(&s, sizeof(s));
1002 	return (error);
1003 }
1004 
1005 /*
1006  * Apply a cipher and a digest to perform EtA.
1007  */
1008 static int
1009 swcr_eta(const struct swcr_session *ses, struct cryptop *crp)
1010 {
1011 	int error;
1012 
1013 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1014 		error = swcr_encdec(ses, crp);
1015 		if (error == 0)
1016 			error = swcr_authcompute(ses, crp);
1017 	} else {
1018 		error = swcr_authcompute(ses, crp);
1019 		if (error == 0)
1020 			error = swcr_encdec(ses, crp);
1021 	}
1022 	return (error);
1023 }
1024 
1025 /*
1026  * Apply a compression/decompression algorithm
1027  */
1028 static int
1029 swcr_compdec(const struct swcr_session *ses, struct cryptop *crp)
1030 {
1031 	const struct comp_algo *cxf;
1032 	uint8_t *data, *out;
1033 	int adj;
1034 	uint32_t result;
1035 
1036 	cxf = ses->swcr_compdec.sw_cxf;
1037 
1038 	/* We must handle the whole buffer of data in one time
1039 	 * then if there is not all the data in the mbuf, we must
1040 	 * copy in a buffer.
1041 	 */
1042 
1043 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
1044 	if (data == NULL)
1045 		return (EINVAL);
1046 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
1047 	    data);
1048 
1049 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
1050 		result = cxf->compress(data, crp->crp_payload_length, &out);
1051 	else
1052 		result = cxf->decompress(data, crp->crp_payload_length, &out);
1053 
1054 	free(data, M_CRYPTO_DATA);
1055 	if (result == 0)
1056 		return (EINVAL);
1057 	crp->crp_olen = result;
1058 
1059 	/* Check the compressed size when doing compression */
1060 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
1061 		if (result >= crp->crp_payload_length) {
1062 			/* Compression was useless, we lost time */
1063 			free(out, M_CRYPTO_DATA);
1064 			return (0);
1065 		}
1066 	}
1067 
1068 	/* Copy back the (de)compressed data. m_copyback is
1069 	 * extending the mbuf as necessary.
1070 	 */
1071 	crypto_copyback(crp, crp->crp_payload_start, result, out);
1072 	if (result < crp->crp_payload_length) {
1073 		switch (crp->crp_buf.cb_type) {
1074 		case CRYPTO_BUF_MBUF:
1075 		case CRYPTO_BUF_SINGLE_MBUF:
1076 			adj = result - crp->crp_payload_length;
1077 			m_adj(crp->crp_buf.cb_mbuf, adj);
1078 			break;
1079 		case CRYPTO_BUF_UIO: {
1080 			struct uio *uio = crp->crp_buf.cb_uio;
1081 			int ind;
1082 
1083 			adj = crp->crp_payload_length - result;
1084 			ind = uio->uio_iovcnt - 1;
1085 
1086 			while (adj > 0 && ind >= 0) {
1087 				if (adj < uio->uio_iov[ind].iov_len) {
1088 					uio->uio_iov[ind].iov_len -= adj;
1089 					break;
1090 				}
1091 
1092 				adj -= uio->uio_iov[ind].iov_len;
1093 				uio->uio_iov[ind].iov_len = 0;
1094 				ind--;
1095 				uio->uio_iovcnt--;
1096 			}
1097 			}
1098 			break;
1099 		case CRYPTO_BUF_VMPAGE:
1100 			adj = crp->crp_payload_length - result;
1101 			crp->crp_buf.cb_vm_page_len -= adj;
1102 			break;
1103 		default:
1104 			break;
1105 		}
1106 	}
1107 	free(out, M_CRYPTO_DATA);
1108 	return 0;
1109 }
1110 
1111 static int
1112 swcr_setup_cipher(struct swcr_session *ses,
1113     const struct crypto_session_params *csp)
1114 {
1115 	struct swcr_encdec *swe;
1116 	const struct enc_xform *txf;
1117 	int error;
1118 
1119 	swe = &ses->swcr_encdec;
1120 	txf = crypto_cipher(csp);
1121 	if (csp->csp_cipher_key != NULL) {
1122 		if (txf->ctxsize != 0) {
1123 			swe->sw_ctx = malloc(txf->ctxsize, M_CRYPTO_DATA,
1124 			    M_NOWAIT);
1125 			if (swe->sw_ctx == NULL)
1126 				return (ENOMEM);
1127 		}
1128 		error = txf->setkey(swe->sw_ctx,
1129 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1130 		if (error)
1131 			return (error);
1132 	}
1133 	swe->sw_exf = txf;
1134 	return (0);
1135 }
1136 
1137 static int
1138 swcr_setup_auth(struct swcr_session *ses,
1139     const struct crypto_session_params *csp)
1140 {
1141 	struct swcr_auth *swa;
1142 	const struct auth_hash *axf;
1143 
1144 	swa = &ses->swcr_auth;
1145 
1146 	axf = crypto_auth_hash(csp);
1147 	swa->sw_axf = axf;
1148 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1149 		return (EINVAL);
1150 	if (csp->csp_auth_mlen == 0)
1151 		swa->sw_mlen = axf->hashsize;
1152 	else
1153 		swa->sw_mlen = csp->csp_auth_mlen;
1154 	if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) {
1155 		swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1156 		    M_NOWAIT);
1157 		if (swa->sw_ictx == NULL)
1158 			return (ENOBUFS);
1159 	}
1160 
1161 	switch (csp->csp_auth_alg) {
1162 	case CRYPTO_SHA1_HMAC:
1163 	case CRYPTO_SHA2_224_HMAC:
1164 	case CRYPTO_SHA2_256_HMAC:
1165 	case CRYPTO_SHA2_384_HMAC:
1166 	case CRYPTO_SHA2_512_HMAC:
1167 	case CRYPTO_RIPEMD160_HMAC:
1168 		swa->sw_hmac = true;
1169 		if (csp->csp_auth_key != NULL) {
1170 			swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1171 			    M_NOWAIT);
1172 			if (swa->sw_octx == NULL)
1173 				return (ENOBUFS);
1174 			hmac_init_ipad(axf, csp->csp_auth_key,
1175 			    csp->csp_auth_klen, swa->sw_ictx);
1176 			hmac_init_opad(axf, csp->csp_auth_key,
1177 			    csp->csp_auth_klen, swa->sw_octx);
1178 		}
1179 		break;
1180 	case CRYPTO_SHA1:
1181 	case CRYPTO_SHA2_224:
1182 	case CRYPTO_SHA2_256:
1183 	case CRYPTO_SHA2_384:
1184 	case CRYPTO_SHA2_512:
1185 	case CRYPTO_NULL_HMAC:
1186 		axf->Init(swa->sw_ictx);
1187 		break;
1188 	case CRYPTO_AES_NIST_GMAC:
1189 	case CRYPTO_AES_CCM_CBC_MAC:
1190 	case CRYPTO_POLY1305:
1191 		if (csp->csp_auth_key != NULL) {
1192 			axf->Init(swa->sw_ictx);
1193 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1194 			    csp->csp_auth_klen);
1195 		}
1196 		break;
1197 	case CRYPTO_BLAKE2B:
1198 	case CRYPTO_BLAKE2S:
1199 		/*
1200 		 * Blake2b and Blake2s support an optional key but do
1201 		 * not require one.
1202 		 */
1203 		if (csp->csp_auth_klen == 0)
1204 			axf->Init(swa->sw_ictx);
1205 		else if (csp->csp_auth_key != NULL)
1206 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1207 			    csp->csp_auth_klen);
1208 		break;
1209 	}
1210 
1211 	if (csp->csp_mode == CSP_MODE_DIGEST) {
1212 		switch (csp->csp_auth_alg) {
1213 		case CRYPTO_AES_NIST_GMAC:
1214 			ses->swcr_process = swcr_gmac;
1215 			break;
1216 		case CRYPTO_AES_CCM_CBC_MAC:
1217 			ses->swcr_process = swcr_ccm_cbc_mac;
1218 			break;
1219 		default:
1220 			ses->swcr_process = swcr_authcompute;
1221 		}
1222 	}
1223 
1224 	return (0);
1225 }
1226 
1227 static int
1228 swcr_setup_aead(struct swcr_session *ses,
1229     const struct crypto_session_params *csp)
1230 {
1231 	struct swcr_auth *swa;
1232 	int error;
1233 
1234 	error = swcr_setup_cipher(ses, csp);
1235 	if (error)
1236 		return (error);
1237 
1238 	swa = &ses->swcr_auth;
1239 	if (csp->csp_auth_mlen == 0)
1240 		swa->sw_mlen = ses->swcr_encdec.sw_exf->macsize;
1241 	else
1242 		swa->sw_mlen = csp->csp_auth_mlen;
1243 	return (0);
1244 }
1245 
1246 static bool
1247 swcr_auth_supported(const struct crypto_session_params *csp)
1248 {
1249 	const struct auth_hash *axf;
1250 
1251 	axf = crypto_auth_hash(csp);
1252 	if (axf == NULL)
1253 		return (false);
1254 	switch (csp->csp_auth_alg) {
1255 	case CRYPTO_SHA1_HMAC:
1256 	case CRYPTO_SHA2_224_HMAC:
1257 	case CRYPTO_SHA2_256_HMAC:
1258 	case CRYPTO_SHA2_384_HMAC:
1259 	case CRYPTO_SHA2_512_HMAC:
1260 	case CRYPTO_NULL_HMAC:
1261 	case CRYPTO_RIPEMD160_HMAC:
1262 		break;
1263 	case CRYPTO_AES_NIST_GMAC:
1264 		switch (csp->csp_auth_klen * 8) {
1265 		case 128:
1266 		case 192:
1267 		case 256:
1268 			break;
1269 		default:
1270 			return (false);
1271 		}
1272 		if (csp->csp_auth_key == NULL)
1273 			return (false);
1274 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1275 			return (false);
1276 		break;
1277 	case CRYPTO_POLY1305:
1278 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1279 			return (false);
1280 		break;
1281 	case CRYPTO_AES_CCM_CBC_MAC:
1282 		switch (csp->csp_auth_klen * 8) {
1283 		case 128:
1284 		case 192:
1285 		case 256:
1286 			break;
1287 		default:
1288 			return (false);
1289 		}
1290 		if (csp->csp_auth_key == NULL)
1291 			return (false);
1292 		break;
1293 	}
1294 	return (true);
1295 }
1296 
1297 static bool
1298 swcr_cipher_supported(const struct crypto_session_params *csp)
1299 {
1300 	const struct enc_xform *txf;
1301 
1302 	txf = crypto_cipher(csp);
1303 	if (txf == NULL)
1304 		return (false);
1305 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1306 	    txf->ivsize != csp->csp_ivlen)
1307 		return (false);
1308 	return (true);
1309 }
1310 
1311 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
1312 
1313 static int
1314 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1315 {
1316 	if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
1317 		return (EINVAL);
1318 	switch (csp->csp_mode) {
1319 	case CSP_MODE_COMPRESS:
1320 		switch (csp->csp_cipher_alg) {
1321 		case CRYPTO_DEFLATE_COMP:
1322 			break;
1323 		default:
1324 			return (EINVAL);
1325 		}
1326 		break;
1327 	case CSP_MODE_CIPHER:
1328 		switch (csp->csp_cipher_alg) {
1329 		case CRYPTO_AES_NIST_GCM_16:
1330 		case CRYPTO_AES_CCM_16:
1331 		case CRYPTO_CHACHA20_POLY1305:
1332 			return (EINVAL);
1333 		default:
1334 			if (!swcr_cipher_supported(csp))
1335 				return (EINVAL);
1336 			break;
1337 		}
1338 		break;
1339 	case CSP_MODE_DIGEST:
1340 		if (!swcr_auth_supported(csp))
1341 			return (EINVAL);
1342 		break;
1343 	case CSP_MODE_AEAD:
1344 		switch (csp->csp_cipher_alg) {
1345 		case CRYPTO_AES_NIST_GCM_16:
1346 		case CRYPTO_AES_CCM_16:
1347 			switch (csp->csp_cipher_klen * 8) {
1348 			case 128:
1349 			case 192:
1350 			case 256:
1351 				break;
1352 			default:
1353 				return (EINVAL);
1354 			}
1355 			break;
1356 		case CRYPTO_CHACHA20_POLY1305:
1357 			break;
1358 		default:
1359 			return (EINVAL);
1360 		}
1361 		break;
1362 	case CSP_MODE_ETA:
1363 		/* AEAD algorithms cannot be used for EtA. */
1364 		switch (csp->csp_cipher_alg) {
1365 		case CRYPTO_AES_NIST_GCM_16:
1366 		case CRYPTO_AES_CCM_16:
1367 		case CRYPTO_CHACHA20_POLY1305:
1368 			return (EINVAL);
1369 		}
1370 		switch (csp->csp_auth_alg) {
1371 		case CRYPTO_AES_NIST_GMAC:
1372 		case CRYPTO_AES_CCM_CBC_MAC:
1373 			return (EINVAL);
1374 		}
1375 
1376 		if (!swcr_cipher_supported(csp) ||
1377 		    !swcr_auth_supported(csp))
1378 			return (EINVAL);
1379 		break;
1380 	default:
1381 		return (EINVAL);
1382 	}
1383 
1384 	return (CRYPTODEV_PROBE_SOFTWARE);
1385 }
1386 
1387 /*
1388  * Generate a new software session.
1389  */
1390 static int
1391 swcr_newsession(device_t dev, crypto_session_t cses,
1392     const struct crypto_session_params *csp)
1393 {
1394 	struct swcr_session *ses;
1395 	const struct comp_algo *cxf;
1396 	int error;
1397 
1398 	ses = crypto_get_driver_session(cses);
1399 
1400 	error = 0;
1401 	switch (csp->csp_mode) {
1402 	case CSP_MODE_COMPRESS:
1403 		switch (csp->csp_cipher_alg) {
1404 		case CRYPTO_DEFLATE_COMP:
1405 			cxf = &comp_algo_deflate;
1406 			break;
1407 #ifdef INVARIANTS
1408 		default:
1409 			panic("bad compression algo");
1410 #endif
1411 		}
1412 		ses->swcr_compdec.sw_cxf = cxf;
1413 		ses->swcr_process = swcr_compdec;
1414 		break;
1415 	case CSP_MODE_CIPHER:
1416 		switch (csp->csp_cipher_alg) {
1417 		case CRYPTO_NULL_CBC:
1418 			ses->swcr_process = swcr_null;
1419 			break;
1420 #ifdef INVARIANTS
1421 		case CRYPTO_AES_NIST_GCM_16:
1422 		case CRYPTO_AES_CCM_16:
1423 		case CRYPTO_CHACHA20_POLY1305:
1424 			panic("bad cipher algo");
1425 #endif
1426 		default:
1427 			error = swcr_setup_cipher(ses, csp);
1428 			if (error == 0)
1429 				ses->swcr_process = swcr_encdec;
1430 		}
1431 		break;
1432 	case CSP_MODE_DIGEST:
1433 		error = swcr_setup_auth(ses, csp);
1434 		break;
1435 	case CSP_MODE_AEAD:
1436 		switch (csp->csp_cipher_alg) {
1437 		case CRYPTO_AES_NIST_GCM_16:
1438 			error = swcr_setup_aead(ses, csp);
1439 			if (error == 0)
1440 				ses->swcr_process = swcr_gcm;
1441 			break;
1442 		case CRYPTO_AES_CCM_16:
1443 			error = swcr_setup_aead(ses, csp);
1444 			if (error == 0)
1445 				ses->swcr_process = swcr_ccm;
1446 			break;
1447 		case CRYPTO_CHACHA20_POLY1305:
1448 			error = swcr_setup_aead(ses, csp);
1449 			if (error == 0)
1450 				ses->swcr_process = swcr_chacha20_poly1305;
1451 			break;
1452 #ifdef INVARIANTS
1453 		default:
1454 			panic("bad aead algo");
1455 #endif
1456 		}
1457 		break;
1458 	case CSP_MODE_ETA:
1459 #ifdef INVARIANTS
1460 		switch (csp->csp_cipher_alg) {
1461 		case CRYPTO_AES_NIST_GCM_16:
1462 		case CRYPTO_AES_CCM_16:
1463 		case CRYPTO_CHACHA20_POLY1305:
1464 			panic("bad eta cipher algo");
1465 		}
1466 		switch (csp->csp_auth_alg) {
1467 		case CRYPTO_AES_NIST_GMAC:
1468 		case CRYPTO_AES_CCM_CBC_MAC:
1469 			panic("bad eta auth algo");
1470 		}
1471 #endif
1472 
1473 		error = swcr_setup_auth(ses, csp);
1474 		if (error)
1475 			break;
1476 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1477 			/* Effectively degrade to digest mode. */
1478 			ses->swcr_process = swcr_authcompute;
1479 			break;
1480 		}
1481 
1482 		error = swcr_setup_cipher(ses, csp);
1483 		if (error == 0)
1484 			ses->swcr_process = swcr_eta;
1485 		break;
1486 	default:
1487 		error = EINVAL;
1488 	}
1489 
1490 	if (error)
1491 		swcr_freesession(dev, cses);
1492 	return (error);
1493 }
1494 
1495 static void
1496 swcr_freesession(device_t dev, crypto_session_t cses)
1497 {
1498 	struct swcr_session *ses;
1499 
1500 	ses = crypto_get_driver_session(cses);
1501 
1502 	zfree(ses->swcr_encdec.sw_ctx, M_CRYPTO_DATA);
1503 	zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
1504 	zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
1505 }
1506 
1507 /*
1508  * Process a software request.
1509  */
1510 static int
1511 swcr_process(device_t dev, struct cryptop *crp, int hint)
1512 {
1513 	struct swcr_session *ses;
1514 
1515 	ses = crypto_get_driver_session(crp->crp_session);
1516 
1517 	crp->crp_etype = ses->swcr_process(ses, crp);
1518 
1519 	crypto_done(crp);
1520 	return (0);
1521 }
1522 
1523 static void
1524 swcr_identify(driver_t *drv, device_t parent)
1525 {
1526 	/* NB: order 10 is so we get attached after h/w devices */
1527 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1528 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1529 		panic("cryptosoft: could not attach");
1530 }
1531 
1532 static int
1533 swcr_probe(device_t dev)
1534 {
1535 	device_set_desc(dev, "software crypto");
1536 	device_quiet(dev);
1537 	return (BUS_PROBE_NOWILDCARD);
1538 }
1539 
1540 static int
1541 swcr_attach(device_t dev)
1542 {
1543 
1544 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1545 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1546 	if (swcr_id < 0) {
1547 		device_printf(dev, "cannot initialize!");
1548 		return (ENXIO);
1549 	}
1550 
1551 	return (0);
1552 }
1553 
1554 static int
1555 swcr_detach(device_t dev)
1556 {
1557 	crypto_unregister_all(swcr_id);
1558 	return 0;
1559 }
1560 
1561 static device_method_t swcr_methods[] = {
1562 	DEVMETHOD(device_identify,	swcr_identify),
1563 	DEVMETHOD(device_probe,		swcr_probe),
1564 	DEVMETHOD(device_attach,	swcr_attach),
1565 	DEVMETHOD(device_detach,	swcr_detach),
1566 
1567 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1568 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1569 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1570 	DEVMETHOD(cryptodev_process,	swcr_process),
1571 
1572 	{0, 0},
1573 };
1574 
1575 static driver_t swcr_driver = {
1576 	"cryptosoft",
1577 	swcr_methods,
1578 	0,		/* NB: no softc */
1579 };
1580 static devclass_t swcr_devclass;
1581 
1582 /*
1583  * NB: We explicitly reference the crypto module so we
1584  * get the necessary ordering when built as a loadable
1585  * module.  This is required because we bundle the crypto
1586  * module code together with the cryptosoft driver (otherwise
1587  * normal module dependencies would handle things).
1588  */
1589 extern int crypto_modevent(struct module *, int, void *);
1590 /* XXX where to attach */
1591 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0);
1592 MODULE_VERSION(cryptosoft, 1);
1593 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1594