xref: /freebsd/sys/opencrypto/cryptosoft.c (revision 031beb4e239bfce798af17f5fe8dba8bcaf13d99)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014-2021 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Portions of this software were developed by Ararat River
20  * Consulting, LLC under sponsorship of the FreeBSD Foundation.
21  *
22  * Permission to use, copy, and modify this software with or without fee
23  * is hereby granted, provided that this entire notice is included in
24  * all source code copies of any software which is or includes a copy or
25  * modification of this software.
26  *
27  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
28  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
29  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
30  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
31  * PURPOSE.
32  */
33 
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/malloc.h>
38 #include <sys/mbuf.h>
39 #include <sys/module.h>
40 #include <sys/sysctl.h>
41 #include <sys/errno.h>
42 #include <sys/random.h>
43 #include <sys/kernel.h>
44 #include <sys/uio.h>
45 #include <sys/endian.h>
46 #include <sys/limits.h>
47 
48 #include <crypto/sha1.h>
49 #include <opencrypto/rmd160.h>
50 
51 #include <opencrypto/cryptodev.h>
52 #include <opencrypto/xform.h>
53 
54 #include <sys/kobj.h>
55 #include <sys/bus.h>
56 #include "cryptodev_if.h"
57 
58 struct swcr_auth {
59 	void		*sw_ictx;
60 	void		*sw_octx;
61 	const struct auth_hash *sw_axf;
62 	uint16_t	sw_mlen;
63 	bool		sw_hmac;
64 };
65 
66 struct swcr_encdec {
67 	void		*sw_ctx;
68 	const struct enc_xform *sw_exf;
69 };
70 
71 struct swcr_compdec {
72 	const struct comp_algo *sw_cxf;
73 };
74 
75 struct swcr_session {
76 	int	(*swcr_process)(const struct swcr_session *, struct cryptop *);
77 
78 	struct swcr_auth swcr_auth;
79 	struct swcr_encdec swcr_encdec;
80 	struct swcr_compdec swcr_compdec;
81 };
82 
83 static	int32_t swcr_id;
84 
85 static	void swcr_freesession(device_t dev, crypto_session_t cses);
86 
87 /* Used for CRYPTO_NULL_CBC. */
88 static int
89 swcr_null(const struct swcr_session *ses, struct cryptop *crp)
90 {
91 
92 	return (0);
93 }
94 
95 /*
96  * Apply a symmetric encryption/decryption algorithm.
97  */
98 static int
99 swcr_encdec(const struct swcr_session *ses, struct cryptop *crp)
100 {
101 	unsigned char blk[EALG_MAX_BLOCK_LEN];
102 	const struct crypto_session_params *csp;
103 	const struct enc_xform *exf;
104 	const struct swcr_encdec *sw;
105 	void *ctx;
106 	size_t inlen, outlen, todo;
107 	int blksz, resid;
108 	struct crypto_buffer_cursor cc_in, cc_out;
109 	const unsigned char *inblk;
110 	unsigned char *outblk;
111 	int error;
112 	bool encrypting;
113 
114 	error = 0;
115 
116 	sw = &ses->swcr_encdec;
117 	exf = sw->sw_exf;
118 	csp = crypto_get_params(crp->crp_session);
119 
120 	if (exf->native_blocksize == 0) {
121 		/* Check for non-padded data */
122 		if ((crp->crp_payload_length % exf->blocksize) != 0)
123 			return (EINVAL);
124 
125 		blksz = exf->blocksize;
126 	} else
127 		blksz = exf->native_blocksize;
128 
129 	if (exf == &enc_xform_aes_icm &&
130 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
131 		return (EINVAL);
132 
133 	ctx = __builtin_alloca(exf->ctxsize);
134 	if (crp->crp_cipher_key != NULL) {
135 		error = exf->setkey(ctx, crp->crp_cipher_key,
136 		    csp->csp_cipher_klen);
137 		if (error)
138 			return (error);
139 	} else
140 		memcpy(ctx, sw->sw_ctx, exf->ctxsize);
141 
142 	crypto_read_iv(crp, blk);
143 	exf->reinit(ctx, blk, csp->csp_ivlen);
144 
145 	crypto_cursor_init(&cc_in, &crp->crp_buf);
146 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
147 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
148 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
149 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
150 	} else
151 		cc_out = cc_in;
152 
153 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
154 
155 	/*
156 	 * Loop through encrypting blocks.  'inlen' is the remaining
157 	 * length of the current segment in the input buffer.
158 	 * 'outlen' is the remaining length of current segment in the
159 	 * output buffer.
160 	 */
161 	inlen = outlen = 0;
162 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= todo) {
163 		if (inlen == 0)
164 			inblk = crypto_cursor_segment(&cc_in, &inlen);
165 		if (outlen == 0)
166 			outblk = crypto_cursor_segment(&cc_out, &outlen);
167 
168 		/*
169 		 * If the current block is not contained within the
170 		 * current input/output segment, use 'blk' as a local
171 		 * buffer.
172 		 */
173 		if (inlen < blksz) {
174 			crypto_cursor_copydata(&cc_in, blksz, blk);
175 			inblk = blk;
176 			inlen = blksz;
177 		}
178 		if (outlen < blksz) {
179 			outblk = blk;
180 			outlen = blksz;
181 		}
182 
183 		todo = rounddown2(MIN(resid, MIN(inlen, outlen)), blksz);
184 
185 		if (encrypting)
186 			exf->encrypt_multi(ctx, inblk, outblk, todo);
187 		else
188 			exf->decrypt_multi(ctx, inblk, outblk, todo);
189 
190 		if (inblk == blk) {
191 			inblk = crypto_cursor_segment(&cc_in, &inlen);
192 		} else {
193 			crypto_cursor_advance(&cc_in, todo);
194 			inlen -= todo;
195 			inblk += todo;
196 		}
197 
198 		if (outblk == blk) {
199 			crypto_cursor_copyback(&cc_out, blksz, blk);
200 			outblk = crypto_cursor_segment(&cc_out, &outlen);
201 		} else {
202 			crypto_cursor_advance(&cc_out, todo);
203 			outlen -= todo;
204 			outblk += todo;
205 		}
206 	}
207 
208 	/* Handle trailing partial block for stream ciphers. */
209 	if (resid > 0) {
210 		KASSERT(exf->native_blocksize != 0,
211 		    ("%s: partial block of %d bytes for cipher %s",
212 		    __func__, resid, exf->name));
213 		KASSERT(resid < blksz, ("%s: partial block too big", __func__));
214 
215 		inblk = crypto_cursor_segment(&cc_in, &inlen);
216 		outblk = crypto_cursor_segment(&cc_out, &outlen);
217 		if (inlen < resid) {
218 			crypto_cursor_copydata(&cc_in, resid, blk);
219 			inblk = blk;
220 		}
221 		if (outlen < resid)
222 			outblk = blk;
223 		if (encrypting)
224 			exf->encrypt_last(ctx, inblk, outblk,
225 			    resid);
226 		else
227 			exf->decrypt_last(ctx, inblk, outblk,
228 			    resid);
229 		if (outlen < resid)
230 			crypto_cursor_copyback(&cc_out, resid, blk);
231 	}
232 
233 	explicit_bzero(ctx, exf->ctxsize);
234 	explicit_bzero(blk, sizeof(blk));
235 	return (0);
236 }
237 
238 /*
239  * Compute or verify hash.
240  */
241 static int
242 swcr_authcompute(const struct swcr_session *ses, struct cryptop *crp)
243 {
244 	struct {
245 		union authctx ctx;
246 		u_char aalg[HASH_MAX_LEN];
247 		u_char uaalg[HASH_MAX_LEN];
248 	} s;
249 	const struct crypto_session_params *csp;
250 	const struct swcr_auth *sw;
251 	const struct auth_hash *axf;
252 	int err;
253 
254 	sw = &ses->swcr_auth;
255 
256 	axf = sw->sw_axf;
257 
258 	csp = crypto_get_params(crp->crp_session);
259 	if (crp->crp_auth_key != NULL) {
260 		if (sw->sw_hmac) {
261 			hmac_init_ipad(axf, crp->crp_auth_key,
262 			    csp->csp_auth_klen, &s.ctx);
263 		} else {
264 			axf->Init(&s.ctx);
265 			axf->Setkey(&s.ctx, crp->crp_auth_key,
266 			    csp->csp_auth_klen);
267 		}
268 	} else
269 		memcpy(&s.ctx, sw->sw_ictx, axf->ctxsize);
270 
271 	if (crp->crp_aad != NULL)
272 		err = axf->Update(&s.ctx, crp->crp_aad, crp->crp_aad_length);
273 	else
274 		err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
275 		    axf->Update, &s.ctx);
276 	if (err)
277 		goto out;
278 
279 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
280 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
281 		err = crypto_apply_buf(&crp->crp_obuf,
282 		    crp->crp_payload_output_start, crp->crp_payload_length,
283 		    axf->Update, &s.ctx);
284 	else
285 		err = crypto_apply(crp, crp->crp_payload_start,
286 		    crp->crp_payload_length, axf->Update, &s.ctx);
287 	if (err)
288 		goto out;
289 
290 	if (csp->csp_flags & CSP_F_ESN)
291 		axf->Update(&s.ctx, crp->crp_esn, 4);
292 
293 	axf->Final(s.aalg, &s.ctx);
294 	if (sw->sw_hmac) {
295 		if (crp->crp_auth_key != NULL)
296 			hmac_init_opad(axf, crp->crp_auth_key,
297 			    csp->csp_auth_klen, &s.ctx);
298 		else
299 			memcpy(&s.ctx, sw->sw_octx, axf->ctxsize);
300 		axf->Update(&s.ctx, s.aalg, axf->hashsize);
301 		axf->Final(s.aalg, &s.ctx);
302 	}
303 
304 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
305 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, s.uaalg);
306 		if (timingsafe_bcmp(s.aalg, s.uaalg, sw->sw_mlen) != 0)
307 			err = EBADMSG;
308 	} else {
309 		/* Inject the authentication data */
310 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, s.aalg);
311 	}
312 out:
313 	explicit_bzero(&s, sizeof(s));
314 	return (err);
315 }
316 
317 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
318 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
319 
320 static int
321 swcr_gmac(const struct swcr_session *ses, struct cryptop *crp)
322 {
323 	struct {
324 		union authctx ctx;
325 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
326 		u_char tag[GMAC_DIGEST_LEN];
327 		u_char tag2[GMAC_DIGEST_LEN];
328 	} s;
329 	u_char *blk = (u_char *)s.blkbuf;
330 	struct crypto_buffer_cursor cc;
331 	const u_char *inblk;
332 	const struct swcr_auth *swa;
333 	const struct auth_hash *axf;
334 	uint32_t *blkp;
335 	size_t len;
336 	int blksz, error, ivlen, resid;
337 
338 	swa = &ses->swcr_auth;
339 	axf = swa->sw_axf;
340 	blksz = GMAC_BLOCK_LEN;
341 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
342 	    __func__));
343 
344 	if (crp->crp_auth_key != NULL) {
345 		axf->Init(&s.ctx);
346 		axf->Setkey(&s.ctx, crp->crp_auth_key,
347 		    crypto_get_params(crp->crp_session)->csp_auth_klen);
348 	} else
349 		memcpy(&s.ctx, swa->sw_ictx, axf->ctxsize);
350 
351 	/* Initialize the IV */
352 	ivlen = AES_GCM_IV_LEN;
353 	crypto_read_iv(crp, blk);
354 
355 	axf->Reinit(&s.ctx, blk, ivlen);
356 	crypto_cursor_init(&cc, &crp->crp_buf);
357 	crypto_cursor_advance(&cc, crp->crp_payload_start);
358 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
359 		inblk = crypto_cursor_segment(&cc, &len);
360 		if (len >= blksz) {
361 			len = rounddown(MIN(len, resid), blksz);
362 			crypto_cursor_advance(&cc, len);
363 		} else {
364 			len = blksz;
365 			crypto_cursor_copydata(&cc, len, blk);
366 			inblk = blk;
367 		}
368 		axf->Update(&s.ctx, inblk, len);
369 	}
370 	if (resid > 0) {
371 		memset(blk, 0, blksz);
372 		crypto_cursor_copydata(&cc, resid, blk);
373 		axf->Update(&s.ctx, blk, blksz);
374 	}
375 
376 	/* length block */
377 	memset(blk, 0, blksz);
378 	blkp = (uint32_t *)blk + 1;
379 	*blkp = htobe32(crp->crp_payload_length * 8);
380 	axf->Update(&s.ctx, blk, blksz);
381 
382 	/* Finalize MAC */
383 	axf->Final(s.tag, &s.ctx);
384 
385 	error = 0;
386 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
387 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
388 		    s.tag2);
389 		if (timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen) != 0)
390 			error = EBADMSG;
391 	} else {
392 		/* Inject the authentication data */
393 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, s.tag);
394 	}
395 	explicit_bzero(&s, sizeof(s));
396 	return (error);
397 }
398 
399 static int
400 swcr_gcm(const struct swcr_session *ses, struct cryptop *crp)
401 {
402 	struct {
403 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
404 		u_char tag[GMAC_DIGEST_LEN];
405 		u_char tag2[GMAC_DIGEST_LEN];
406 	} s;
407 	u_char *blk = (u_char *)s.blkbuf;
408 	struct crypto_buffer_cursor cc_in, cc_out;
409 	const u_char *inblk;
410 	u_char *outblk;
411 	size_t inlen, outlen, todo;
412 	const struct swcr_auth *swa;
413 	const struct swcr_encdec *swe;
414 	const struct enc_xform *exf;
415 	void *ctx;
416 	uint32_t *blkp;
417 	int blksz, error, ivlen, r, resid;
418 
419 	swa = &ses->swcr_auth;
420 	swe = &ses->swcr_encdec;
421 	exf = swe->sw_exf;
422 	blksz = GMAC_BLOCK_LEN;
423 	KASSERT(blksz == exf->native_blocksize,
424 	    ("%s: blocksize mismatch", __func__));
425 
426 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
427 		return (EINVAL);
428 
429 	ivlen = AES_GCM_IV_LEN;
430 
431 	ctx = __builtin_alloca(exf->ctxsize);
432 	if (crp->crp_cipher_key != NULL)
433 		exf->setkey(ctx, crp->crp_cipher_key,
434 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
435 	else
436 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
437 	exf->reinit(ctx, crp->crp_iv, ivlen);
438 
439 	/* Supply MAC with AAD */
440 	if (crp->crp_aad != NULL) {
441 		inlen = rounddown2(crp->crp_aad_length, blksz);
442 		if (inlen != 0)
443 			exf->update(ctx, crp->crp_aad, inlen);
444 		if (crp->crp_aad_length != inlen) {
445 			memset(blk, 0, blksz);
446 			memcpy(blk, (char *)crp->crp_aad + inlen,
447 			    crp->crp_aad_length - inlen);
448 			exf->update(ctx, blk, blksz);
449 		}
450 	} else {
451 		crypto_cursor_init(&cc_in, &crp->crp_buf);
452 		crypto_cursor_advance(&cc_in, crp->crp_aad_start);
453 		for (resid = crp->crp_aad_length; resid >= blksz;
454 		     resid -= inlen) {
455 			inblk = crypto_cursor_segment(&cc_in, &inlen);
456 			if (inlen >= blksz) {
457 				inlen = rounddown2(MIN(inlen, resid), blksz);
458 				crypto_cursor_advance(&cc_in, inlen);
459 			} else {
460 				inlen = blksz;
461 				crypto_cursor_copydata(&cc_in, inlen, blk);
462 				inblk = blk;
463 			}
464 			exf->update(ctx, inblk, inlen);
465 		}
466 		if (resid > 0) {
467 			memset(blk, 0, blksz);
468 			crypto_cursor_copydata(&cc_in, resid, blk);
469 			exf->update(ctx, blk, blksz);
470 		}
471 	}
472 
473 	/* Do encryption with MAC */
474 	crypto_cursor_init(&cc_in, &crp->crp_buf);
475 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
476 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
477 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
478 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
479 	} else
480 		cc_out = cc_in;
481 
482 	inlen = outlen = 0;
483 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= todo) {
484 		if (inlen == 0)
485 			inblk = crypto_cursor_segment(&cc_in, &inlen);
486 		if (outlen == 0)
487 			outblk = crypto_cursor_segment(&cc_out, &outlen);
488 
489 		if (inlen < blksz) {
490 			crypto_cursor_copydata(&cc_in, blksz, blk);
491 			inblk = blk;
492 			inlen = blksz;
493 		}
494 
495 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
496 			if (outlen < blksz) {
497 				outblk = blk;
498 				outlen = blksz;
499 			}
500 
501 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
502 			    blksz);
503 
504 			exf->encrypt_multi(ctx, inblk, outblk, todo);
505 			exf->update(ctx, outblk, todo);
506 
507 			if (outblk == blk) {
508 				crypto_cursor_copyback(&cc_out, blksz, blk);
509 				outblk = crypto_cursor_segment(&cc_out, &outlen);
510 			} else {
511 				crypto_cursor_advance(&cc_out, todo);
512 				outlen -= todo;
513 				outblk += todo;
514 			}
515 		} else {
516 			todo = rounddown2(MIN(resid, inlen), blksz);
517 			exf->update(ctx, inblk, todo);
518 		}
519 
520 		if (inblk == blk) {
521 			inblk = crypto_cursor_segment(&cc_in, &inlen);
522 		} else {
523 			crypto_cursor_advance(&cc_in, todo);
524 			inlen -= todo;
525 			inblk += todo;
526 		}
527 	}
528 	if (resid > 0) {
529 		crypto_cursor_copydata(&cc_in, resid, blk);
530 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
531 			exf->encrypt_last(ctx, blk, blk, resid);
532 			crypto_cursor_copyback(&cc_out, resid, blk);
533 		}
534 		exf->update(ctx, blk, resid);
535 	}
536 
537 	/* length block */
538 	memset(blk, 0, blksz);
539 	blkp = (uint32_t *)blk + 1;
540 	*blkp = htobe32(crp->crp_aad_length * 8);
541 	blkp = (uint32_t *)blk + 3;
542 	*blkp = htobe32(crp->crp_payload_length * 8);
543 	exf->update(ctx, blk, blksz);
544 
545 	/* Finalize MAC */
546 	exf->final(s.tag, ctx);
547 
548 	/* Validate tag */
549 	error = 0;
550 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
551 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
552 		    s.tag2);
553 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
554 		if (r != 0) {
555 			error = EBADMSG;
556 			goto out;
557 		}
558 
559 		/* tag matches, decrypt data */
560 		crypto_cursor_init(&cc_in, &crp->crp_buf);
561 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
562 
563 		inlen = 0;
564 		for (resid = crp->crp_payload_length; resid > blksz;
565 		     resid -= todo) {
566 			if (inlen == 0)
567 				inblk = crypto_cursor_segment(&cc_in, &inlen);
568 			if (outlen == 0)
569 				outblk = crypto_cursor_segment(&cc_out, &outlen);
570 			if (inlen < blksz) {
571 				crypto_cursor_copydata(&cc_in, blksz, blk);
572 				inblk = blk;
573 				inlen = blksz;
574 			}
575 			if (outlen < blksz) {
576 				outblk = blk;
577 				outlen = blksz;
578 			}
579 
580 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
581 			    blksz);
582 
583 			exf->decrypt_multi(ctx, inblk, outblk, todo);
584 
585 			if (inblk == blk) {
586 				inblk = crypto_cursor_segment(&cc_in, &inlen);
587 			} else {
588 				crypto_cursor_advance(&cc_in, todo);
589 				inlen -= todo;
590 				inblk += todo;
591 			}
592 
593 			if (outblk == blk) {
594 				crypto_cursor_copyback(&cc_out, blksz, blk);
595 				outblk = crypto_cursor_segment(&cc_out,
596 				    &outlen);
597 			} else {
598 				crypto_cursor_advance(&cc_out, todo);
599 				outlen -= todo;
600 				outblk += todo;
601 			}
602 		}
603 		if (resid > 0) {
604 			crypto_cursor_copydata(&cc_in, resid, blk);
605 			exf->decrypt_last(ctx, blk, blk, resid);
606 			crypto_cursor_copyback(&cc_out, resid, blk);
607 		}
608 	} else {
609 		/* Inject the authentication data */
610 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
611 		    s.tag);
612 	}
613 
614 out:
615 	explicit_bzero(ctx, exf->ctxsize);
616 	explicit_bzero(&s, sizeof(s));
617 
618 	return (error);
619 }
620 
621 static void
622 build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length,
623     u_int data_length, u_int tag_length, uint8_t *b0)
624 {
625 	uint8_t *bp;
626 	uint8_t flags, L;
627 
628 	KASSERT(nonce_length >= 7 && nonce_length <= 13,
629 	    ("nonce_length must be between 7 and 13 bytes"));
630 
631 	/*
632 	 * Need to determine the L field value.  This is the number of
633 	 * bytes needed to specify the length of the message; the length
634 	 * is whatever is left in the 16 bytes after specifying flags and
635 	 * the nonce.
636 	 */
637 	L = 15 - nonce_length;
638 
639 	flags = ((aad_length > 0) << 6) +
640 	    (((tag_length - 2) / 2) << 3) +
641 	    L - 1;
642 
643 	/*
644 	 * Now we need to set up the first block, which has flags, nonce,
645 	 * and the message length.
646 	 */
647 	b0[0] = flags;
648 	memcpy(b0 + 1, nonce, nonce_length);
649 	bp = b0 + 1 + nonce_length;
650 
651 	/* Need to copy L' [aka L-1] bytes of data_length */
652 	for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) {
653 		*dst = data_length;
654 		data_length >>= 8;
655 	}
656 }
657 
658 /* NB: OCF only supports AAD lengths < 2^32. */
659 static int
660 build_ccm_aad_length(u_int aad_length, uint8_t *blk)
661 {
662 	if (aad_length < ((1 << 16) - (1 << 8))) {
663 		be16enc(blk, aad_length);
664 		return (sizeof(uint16_t));
665 	} else {
666 		blk[0] = 0xff;
667 		blk[1] = 0xfe;
668 		be32enc(blk + 2, aad_length);
669 		return (2 + sizeof(uint32_t));
670 	}
671 }
672 
673 static int
674 swcr_ccm_cbc_mac(const struct swcr_session *ses, struct cryptop *crp)
675 {
676 	struct {
677 		union authctx ctx;
678 		u_char blk[CCM_CBC_BLOCK_LEN];
679 		u_char tag[AES_CBC_MAC_HASH_LEN];
680 		u_char tag2[AES_CBC_MAC_HASH_LEN];
681 	} s;
682 	const struct crypto_session_params *csp;
683 	const struct swcr_auth *swa;
684 	const struct auth_hash *axf;
685 	int error, ivlen, len;
686 
687 	csp = crypto_get_params(crp->crp_session);
688 	swa = &ses->swcr_auth;
689 	axf = swa->sw_axf;
690 
691 	if (crp->crp_auth_key != NULL) {
692 		axf->Init(&s.ctx);
693 		axf->Setkey(&s.ctx, crp->crp_auth_key, csp->csp_auth_klen);
694 	} else
695 		memcpy(&s.ctx, swa->sw_ictx, axf->ctxsize);
696 
697 	/* Initialize the IV */
698 	ivlen = csp->csp_ivlen;
699 
700 	/* Supply MAC with IV */
701 	axf->Reinit(&s.ctx, crp->crp_iv, ivlen);
702 
703 	/* Supply MAC with b0. */
704 	build_ccm_b0(crp->crp_iv, ivlen, crp->crp_payload_length, 0,
705 	    swa->sw_mlen, s.blk);
706 	axf->Update(&s.ctx, s.blk, CCM_CBC_BLOCK_LEN);
707 
708 	len = build_ccm_aad_length(crp->crp_payload_length, s.blk);
709 	axf->Update(&s.ctx, s.blk, len);
710 
711 	crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
712 	    axf->Update, &s.ctx);
713 
714 	/* Finalize MAC */
715 	axf->Final(s.tag, &s.ctx);
716 
717 	error = 0;
718 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
719 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
720 		    s.tag2);
721 		if (timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen) != 0)
722 			error = EBADMSG;
723 	} else {
724 		/* Inject the authentication data */
725 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
726 		    s.tag);
727 	}
728 	explicit_bzero(&s, sizeof(s));
729 	return (error);
730 }
731 
732 static int
733 swcr_ccm(const struct swcr_session *ses, struct cryptop *crp)
734 {
735 	const struct crypto_session_params *csp;
736 	struct {
737 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
738 		u_char tag[AES_CBC_MAC_HASH_LEN];
739 		u_char tag2[AES_CBC_MAC_HASH_LEN];
740 	} s;
741 	u_char *blk = (u_char *)s.blkbuf;
742 	struct crypto_buffer_cursor cc_in, cc_out;
743 	const u_char *inblk;
744 	u_char *outblk;
745 	size_t inlen, outlen, todo;
746 	const struct swcr_auth *swa;
747 	const struct swcr_encdec *swe;
748 	const struct enc_xform *exf;
749 	void *ctx;
750 	size_t len;
751 	int blksz, error, ivlen, r, resid;
752 
753 	csp = crypto_get_params(crp->crp_session);
754 	swa = &ses->swcr_auth;
755 	swe = &ses->swcr_encdec;
756 	exf = swe->sw_exf;
757 	blksz = AES_BLOCK_LEN;
758 	KASSERT(blksz == exf->native_blocksize,
759 	    ("%s: blocksize mismatch", __func__));
760 
761 	if (crp->crp_payload_length > ccm_max_payload_length(csp))
762 		return (EMSGSIZE);
763 
764 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
765 		return (EINVAL);
766 
767 	ivlen = csp->csp_ivlen;
768 
769 	ctx = __builtin_alloca(exf->ctxsize);
770 	if (crp->crp_cipher_key != NULL)
771 		exf->setkey(ctx, crp->crp_cipher_key,
772 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
773 	else
774 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
775 	exf->reinit(ctx, crp->crp_iv, ivlen);
776 
777 	/* Supply MAC with b0. */
778 	_Static_assert(sizeof(s.blkbuf) >= CCM_CBC_BLOCK_LEN,
779 	    "blkbuf too small for b0");
780 	build_ccm_b0(crp->crp_iv, ivlen, crp->crp_aad_length,
781 	    crp->crp_payload_length, swa->sw_mlen, blk);
782 	exf->update(ctx, blk, CCM_CBC_BLOCK_LEN);
783 
784 	/* Supply MAC with AAD */
785 	if (crp->crp_aad_length != 0) {
786 		len = build_ccm_aad_length(crp->crp_aad_length, blk);
787 		exf->update(ctx, blk, len);
788 		if (crp->crp_aad != NULL)
789 			exf->update(ctx, crp->crp_aad, crp->crp_aad_length);
790 		else
791 			crypto_apply(crp, crp->crp_aad_start,
792 			    crp->crp_aad_length, exf->update, ctx);
793 
794 		/* Pad the AAD (including length field) to a full block. */
795 		len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN;
796 		if (len != 0) {
797 			len = CCM_CBC_BLOCK_LEN - len;
798 			memset(blk, 0, CCM_CBC_BLOCK_LEN);
799 			exf->update(ctx, blk, len);
800 		}
801 	}
802 
803 	/* Do encryption/decryption with MAC */
804 	crypto_cursor_init(&cc_in, &crp->crp_buf);
805 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
806 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
807 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
808 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
809 	} else
810 		cc_out = cc_in;
811 
812 	inlen = outlen = 0;
813 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= todo) {
814 		if (inlen == 0)
815 			inblk = crypto_cursor_segment(&cc_in, &inlen);
816 		if (outlen == 0)
817 			outblk = crypto_cursor_segment(&cc_out, &outlen);
818 
819 		if (inlen < blksz) {
820 			crypto_cursor_copydata(&cc_in, blksz, blk);
821 			inblk = blk;
822 			inlen = blksz;
823 		}
824 
825 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
826 			if (outlen < blksz) {
827 				outblk = blk;
828 				outlen = blksz;
829 			}
830 
831 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
832 			    blksz);
833 
834 			exf->update(ctx, inblk, todo);
835 			exf->encrypt_multi(ctx, inblk, outblk, todo);
836 
837 			if (outblk == blk) {
838 				crypto_cursor_copyback(&cc_out, blksz, blk);
839 				outblk = crypto_cursor_segment(&cc_out, &outlen);
840 			} else {
841 				crypto_cursor_advance(&cc_out, todo);
842 				outlen -= todo;
843 				outblk += todo;
844 			}
845 		} else {
846 			/*
847 			 * One of the problems with CCM+CBC is that
848 			 * the authentication is done on the
849 			 * unencrypted data.  As a result, we have to
850 			 * decrypt the data twice: once to generate
851 			 * the tag and a second time after the tag is
852 			 * verified.
853 			 */
854 			todo = blksz;
855 			exf->decrypt(ctx, inblk, blk);
856 			exf->update(ctx, blk, todo);
857 		}
858 
859 		if (inblk == blk) {
860 			inblk = crypto_cursor_segment(&cc_in, &inlen);
861 		} else {
862 			crypto_cursor_advance(&cc_in, todo);
863 			inlen -= todo;
864 			inblk += todo;
865 		}
866 	}
867 	if (resid > 0) {
868 		crypto_cursor_copydata(&cc_in, resid, blk);
869 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
870 			exf->update(ctx, blk, resid);
871 			exf->encrypt_last(ctx, blk, blk, resid);
872 			crypto_cursor_copyback(&cc_out, resid, blk);
873 		} else {
874 			exf->decrypt_last(ctx, blk, blk, resid);
875 			exf->update(ctx, blk, resid);
876 		}
877 	}
878 
879 	/* Finalize MAC */
880 	exf->final(s.tag, ctx);
881 
882 	/* Validate tag */
883 	error = 0;
884 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
885 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
886 		    s.tag2);
887 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
888 		if (r != 0) {
889 			error = EBADMSG;
890 			goto out;
891 		}
892 
893 		/* tag matches, decrypt data */
894 		exf->reinit(ctx, crp->crp_iv, ivlen);
895 		crypto_cursor_init(&cc_in, &crp->crp_buf);
896 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
897 
898 		inlen = 0;
899 		for (resid = crp->crp_payload_length; resid >= blksz;
900 		     resid -= todo) {
901 			if (inlen == 0)
902 				inblk = crypto_cursor_segment(&cc_in, &inlen);
903 			if (outlen == 0)
904 				outblk = crypto_cursor_segment(&cc_out,
905 				    &outlen);
906 
907 			if (inlen < blksz) {
908 				crypto_cursor_copydata(&cc_in, blksz, blk);
909 				inblk = blk;
910 				inlen = blksz;
911 			}
912 			if (outlen < blksz) {
913 				outblk = blk;
914 				outlen = blksz;
915 			}
916 
917 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
918 			    blksz);
919 
920 			exf->decrypt_multi(ctx, inblk, outblk, todo);
921 
922 			if (inblk == blk) {
923 				inblk = crypto_cursor_segment(&cc_in, &inlen);
924 			} else {
925 				crypto_cursor_advance(&cc_in, todo);
926 				inlen -= todo;
927 				inblk += todo;
928 			}
929 
930 			if (outblk == blk) {
931 				crypto_cursor_copyback(&cc_out, blksz, blk);
932 				outblk = crypto_cursor_segment(&cc_out,
933 				    &outlen);
934 			} else {
935 				crypto_cursor_advance(&cc_out, todo);
936 				outlen -= todo;
937 				outblk += todo;
938 			}
939 		}
940 		if (resid > 0) {
941 			crypto_cursor_copydata(&cc_in, resid, blk);
942 			exf->decrypt_last(ctx, blk, blk, resid);
943 			crypto_cursor_copyback(&cc_out, resid, blk);
944 		}
945 	} else {
946 		/* Inject the authentication data */
947 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
948 		    s.tag);
949 	}
950 
951 out:
952 	explicit_bzero(ctx, exf->ctxsize);
953 	explicit_bzero(&s, sizeof(s));
954 	return (error);
955 }
956 
957 static int
958 swcr_chacha20_poly1305(const struct swcr_session *ses, struct cryptop *crp)
959 {
960 	const struct crypto_session_params *csp;
961 	struct {
962 		uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))];
963 		u_char tag[POLY1305_HASH_LEN];
964 		u_char tag2[POLY1305_HASH_LEN];
965 	} s;
966 	u_char *blk = (u_char *)s.blkbuf;
967 	struct crypto_buffer_cursor cc_in, cc_out;
968 	const u_char *inblk;
969 	u_char *outblk;
970 	size_t inlen, outlen, todo;
971 	uint64_t *blkp;
972 	const struct swcr_auth *swa;
973 	const struct swcr_encdec *swe;
974 	const struct enc_xform *exf;
975 	void *ctx;
976 	int blksz, error, r, resid;
977 
978 	swa = &ses->swcr_auth;
979 	swe = &ses->swcr_encdec;
980 	exf = swe->sw_exf;
981 	blksz = exf->native_blocksize;
982 	KASSERT(blksz <= sizeof(s.blkbuf), ("%s: blocksize mismatch", __func__));
983 
984 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
985 		return (EINVAL);
986 
987 	csp = crypto_get_params(crp->crp_session);
988 
989 	ctx = __builtin_alloca(exf->ctxsize);
990 	if (crp->crp_cipher_key != NULL)
991 		exf->setkey(ctx, crp->crp_cipher_key,
992 		    csp->csp_cipher_klen);
993 	else
994 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
995 	exf->reinit(ctx, crp->crp_iv, csp->csp_ivlen);
996 
997 	/* Supply MAC with AAD */
998 	if (crp->crp_aad != NULL)
999 		exf->update(ctx, crp->crp_aad, crp->crp_aad_length);
1000 	else
1001 		crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
1002 		    exf->update, ctx);
1003 	if (crp->crp_aad_length % POLY1305_BLOCK_LEN != 0) {
1004 		/* padding1 */
1005 		memset(blk, 0, POLY1305_BLOCK_LEN);
1006 		exf->update(ctx, blk, POLY1305_BLOCK_LEN -
1007 		    crp->crp_aad_length % POLY1305_BLOCK_LEN);
1008 	}
1009 
1010 	/* Do encryption with MAC */
1011 	crypto_cursor_init(&cc_in, &crp->crp_buf);
1012 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
1013 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
1014 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
1015 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
1016 	} else
1017 		cc_out = cc_in;
1018 
1019 	inlen = outlen = 0;
1020 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1021 		for (resid = crp->crp_payload_length; resid >= blksz;
1022 		     resid -= todo) {
1023 			if (inlen == 0)
1024 				inblk = crypto_cursor_segment(&cc_in, &inlen);
1025 			if (outlen == 0)
1026 				outblk = crypto_cursor_segment(&cc_out,
1027 				    &outlen);
1028 
1029 			if (inlen < blksz) {
1030 				crypto_cursor_copydata(&cc_in, blksz, blk);
1031 				inblk = blk;
1032 				inlen = blksz;
1033 			}
1034 
1035 			if (outlen < blksz) {
1036 				outblk = blk;
1037 				outlen = blksz;
1038 			}
1039 
1040 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
1041 			    blksz);
1042 
1043 			exf->encrypt_multi(ctx, inblk, outblk, todo);
1044 			exf->update(ctx, outblk, todo);
1045 
1046 			if (inblk == blk) {
1047 				inblk = crypto_cursor_segment(&cc_in, &inlen);
1048 			} else {
1049 				crypto_cursor_advance(&cc_in, todo);
1050 				inlen -= todo;
1051 				inblk += todo;
1052 			}
1053 
1054 			if (outblk == blk) {
1055 				crypto_cursor_copyback(&cc_out, blksz, blk);
1056 				outblk = crypto_cursor_segment(&cc_out, &outlen);
1057 			} else {
1058 				crypto_cursor_advance(&cc_out, todo);
1059 				outlen -= todo;
1060 				outblk += todo;
1061 			}
1062 		}
1063 		if (resid > 0) {
1064 			crypto_cursor_copydata(&cc_in, resid, blk);
1065 			exf->encrypt_last(ctx, blk, blk, resid);
1066 			crypto_cursor_copyback(&cc_out, resid, blk);
1067 			exf->update(ctx, blk, resid);
1068 		}
1069 	} else
1070 		crypto_apply(crp, crp->crp_payload_start,
1071 		    crp->crp_payload_length, exf->update, ctx);
1072 	if (crp->crp_payload_length % POLY1305_BLOCK_LEN != 0) {
1073 		/* padding2 */
1074 		memset(blk, 0, POLY1305_BLOCK_LEN);
1075 		exf->update(ctx, blk, POLY1305_BLOCK_LEN -
1076 		    crp->crp_payload_length % POLY1305_BLOCK_LEN);
1077 	}
1078 
1079 	/* lengths */
1080 	blkp = (uint64_t *)blk;
1081 	blkp[0] = htole64(crp->crp_aad_length);
1082 	blkp[1] = htole64(crp->crp_payload_length);
1083 	exf->update(ctx, blk, sizeof(uint64_t) * 2);
1084 
1085 	/* Finalize MAC */
1086 	exf->final(s.tag, ctx);
1087 
1088 	/* Validate tag */
1089 	error = 0;
1090 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1091 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
1092 		    s.tag2);
1093 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
1094 		if (r != 0) {
1095 			error = EBADMSG;
1096 			goto out;
1097 		}
1098 
1099 		/* tag matches, decrypt data */
1100 		crypto_cursor_init(&cc_in, &crp->crp_buf);
1101 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
1102 
1103 		inlen = 0;
1104 		for (resid = crp->crp_payload_length; resid > blksz;
1105 		     resid -= todo) {
1106 			if (inlen == 0)
1107 				inblk = crypto_cursor_segment(&cc_in, &inlen);
1108 			if (outlen == 0)
1109 				outblk = crypto_cursor_segment(&cc_out,
1110 				    &outlen);
1111 			if (inlen < blksz) {
1112 				crypto_cursor_copydata(&cc_in, blksz, blk);
1113 				inblk = blk;
1114 				inlen = blksz;
1115 			}
1116 			if (outlen < blksz) {
1117 				outblk = blk;
1118 				outlen = blksz;
1119 			}
1120 
1121 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
1122 			    blksz);
1123 
1124 			exf->decrypt_multi(ctx, inblk, outblk, todo);
1125 
1126 			if (inblk == blk) {
1127 				inblk = crypto_cursor_segment(&cc_in, &inlen);
1128 			} else {
1129 				crypto_cursor_advance(&cc_in, todo);
1130 				inlen -= todo;
1131 				inblk += todo;
1132 			}
1133 
1134 			if (outblk == blk) {
1135 				crypto_cursor_copyback(&cc_out, blksz, blk);
1136 				outblk = crypto_cursor_segment(&cc_out,
1137 				    &outlen);
1138 			} else {
1139 				crypto_cursor_advance(&cc_out, todo);
1140 				outlen -= todo;
1141 				outblk += todo;
1142 			}
1143 		}
1144 		if (resid > 0) {
1145 			crypto_cursor_copydata(&cc_in, resid, blk);
1146 			exf->decrypt_last(ctx, blk, blk, resid);
1147 			crypto_cursor_copyback(&cc_out, resid, blk);
1148 		}
1149 	} else {
1150 		/* Inject the authentication data */
1151 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
1152 		    s.tag);
1153 	}
1154 
1155 out:
1156 	explicit_bzero(ctx, exf->ctxsize);
1157 	explicit_bzero(&s, sizeof(s));
1158 	return (error);
1159 }
1160 
1161 /*
1162  * Apply a cipher and a digest to perform EtA.
1163  */
1164 static int
1165 swcr_eta(const struct swcr_session *ses, struct cryptop *crp)
1166 {
1167 	int error;
1168 
1169 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1170 		error = swcr_encdec(ses, crp);
1171 		if (error == 0)
1172 			error = swcr_authcompute(ses, crp);
1173 	} else {
1174 		error = swcr_authcompute(ses, crp);
1175 		if (error == 0)
1176 			error = swcr_encdec(ses, crp);
1177 	}
1178 	return (error);
1179 }
1180 
1181 /*
1182  * Apply a compression/decompression algorithm
1183  */
1184 static int
1185 swcr_compdec(const struct swcr_session *ses, struct cryptop *crp)
1186 {
1187 	const struct comp_algo *cxf;
1188 	uint8_t *data, *out;
1189 	int adj;
1190 	uint32_t result;
1191 
1192 	cxf = ses->swcr_compdec.sw_cxf;
1193 
1194 	/* We must handle the whole buffer of data in one time
1195 	 * then if there is not all the data in the mbuf, we must
1196 	 * copy in a buffer.
1197 	 */
1198 
1199 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
1200 	if (data == NULL)
1201 		return (EINVAL);
1202 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
1203 	    data);
1204 
1205 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
1206 		result = cxf->compress(data, crp->crp_payload_length, &out);
1207 	else
1208 		result = cxf->decompress(data, crp->crp_payload_length, &out);
1209 
1210 	free(data, M_CRYPTO_DATA);
1211 	if (result == 0)
1212 		return (EINVAL);
1213 	crp->crp_olen = result;
1214 
1215 	/* Check the compressed size when doing compression */
1216 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
1217 		if (result >= crp->crp_payload_length) {
1218 			/* Compression was useless, we lost time */
1219 			free(out, M_CRYPTO_DATA);
1220 			return (0);
1221 		}
1222 	}
1223 
1224 	/* Copy back the (de)compressed data. m_copyback is
1225 	 * extending the mbuf as necessary.
1226 	 */
1227 	crypto_copyback(crp, crp->crp_payload_start, result, out);
1228 	if (result < crp->crp_payload_length) {
1229 		switch (crp->crp_buf.cb_type) {
1230 		case CRYPTO_BUF_MBUF:
1231 		case CRYPTO_BUF_SINGLE_MBUF:
1232 			adj = result - crp->crp_payload_length;
1233 			m_adj(crp->crp_buf.cb_mbuf, adj);
1234 			break;
1235 		case CRYPTO_BUF_UIO: {
1236 			struct uio *uio = crp->crp_buf.cb_uio;
1237 			int ind;
1238 
1239 			adj = crp->crp_payload_length - result;
1240 			ind = uio->uio_iovcnt - 1;
1241 
1242 			while (adj > 0 && ind >= 0) {
1243 				if (adj < uio->uio_iov[ind].iov_len) {
1244 					uio->uio_iov[ind].iov_len -= adj;
1245 					break;
1246 				}
1247 
1248 				adj -= uio->uio_iov[ind].iov_len;
1249 				uio->uio_iov[ind].iov_len = 0;
1250 				ind--;
1251 				uio->uio_iovcnt--;
1252 			}
1253 			}
1254 			break;
1255 		case CRYPTO_BUF_VMPAGE:
1256 			adj = crp->crp_payload_length - result;
1257 			crp->crp_buf.cb_vm_page_len -= adj;
1258 			break;
1259 		default:
1260 			break;
1261 		}
1262 	}
1263 	free(out, M_CRYPTO_DATA);
1264 	return 0;
1265 }
1266 
1267 static int
1268 swcr_setup_cipher(struct swcr_session *ses,
1269     const struct crypto_session_params *csp)
1270 {
1271 	struct swcr_encdec *swe;
1272 	const struct enc_xform *txf;
1273 	int error;
1274 
1275 	swe = &ses->swcr_encdec;
1276 	txf = crypto_cipher(csp);
1277 	if (csp->csp_cipher_key != NULL) {
1278 		if (txf->ctxsize != 0) {
1279 			swe->sw_ctx = malloc(txf->ctxsize, M_CRYPTO_DATA,
1280 			    M_NOWAIT);
1281 			if (swe->sw_ctx == NULL)
1282 				return (ENOMEM);
1283 		}
1284 		error = txf->setkey(swe->sw_ctx,
1285 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1286 		if (error)
1287 			return (error);
1288 	}
1289 	swe->sw_exf = txf;
1290 	return (0);
1291 }
1292 
1293 static int
1294 swcr_setup_auth(struct swcr_session *ses,
1295     const struct crypto_session_params *csp)
1296 {
1297 	struct swcr_auth *swa;
1298 	const struct auth_hash *axf;
1299 
1300 	swa = &ses->swcr_auth;
1301 
1302 	axf = crypto_auth_hash(csp);
1303 	swa->sw_axf = axf;
1304 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1305 		return (EINVAL);
1306 	if (csp->csp_auth_mlen == 0)
1307 		swa->sw_mlen = axf->hashsize;
1308 	else
1309 		swa->sw_mlen = csp->csp_auth_mlen;
1310 	if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) {
1311 		swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1312 		    M_NOWAIT);
1313 		if (swa->sw_ictx == NULL)
1314 			return (ENOBUFS);
1315 	}
1316 
1317 	switch (csp->csp_auth_alg) {
1318 	case CRYPTO_SHA1_HMAC:
1319 	case CRYPTO_SHA2_224_HMAC:
1320 	case CRYPTO_SHA2_256_HMAC:
1321 	case CRYPTO_SHA2_384_HMAC:
1322 	case CRYPTO_SHA2_512_HMAC:
1323 	case CRYPTO_RIPEMD160_HMAC:
1324 		swa->sw_hmac = true;
1325 		if (csp->csp_auth_key != NULL) {
1326 			swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1327 			    M_NOWAIT);
1328 			if (swa->sw_octx == NULL)
1329 				return (ENOBUFS);
1330 			hmac_init_ipad(axf, csp->csp_auth_key,
1331 			    csp->csp_auth_klen, swa->sw_ictx);
1332 			hmac_init_opad(axf, csp->csp_auth_key,
1333 			    csp->csp_auth_klen, swa->sw_octx);
1334 		}
1335 		break;
1336 	case CRYPTO_RIPEMD160:
1337 	case CRYPTO_SHA1:
1338 	case CRYPTO_SHA2_224:
1339 	case CRYPTO_SHA2_256:
1340 	case CRYPTO_SHA2_384:
1341 	case CRYPTO_SHA2_512:
1342 	case CRYPTO_NULL_HMAC:
1343 		axf->Init(swa->sw_ictx);
1344 		break;
1345 	case CRYPTO_AES_NIST_GMAC:
1346 	case CRYPTO_AES_CCM_CBC_MAC:
1347 	case CRYPTO_POLY1305:
1348 		if (csp->csp_auth_key != NULL) {
1349 			axf->Init(swa->sw_ictx);
1350 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1351 			    csp->csp_auth_klen);
1352 		}
1353 		break;
1354 	case CRYPTO_BLAKE2B:
1355 	case CRYPTO_BLAKE2S:
1356 		/*
1357 		 * Blake2b and Blake2s support an optional key but do
1358 		 * not require one.
1359 		 */
1360 		if (csp->csp_auth_klen == 0)
1361 			axf->Init(swa->sw_ictx);
1362 		else if (csp->csp_auth_key != NULL)
1363 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1364 			    csp->csp_auth_klen);
1365 		break;
1366 	}
1367 
1368 	if (csp->csp_mode == CSP_MODE_DIGEST) {
1369 		switch (csp->csp_auth_alg) {
1370 		case CRYPTO_AES_NIST_GMAC:
1371 			ses->swcr_process = swcr_gmac;
1372 			break;
1373 		case CRYPTO_AES_CCM_CBC_MAC:
1374 			ses->swcr_process = swcr_ccm_cbc_mac;
1375 			break;
1376 		default:
1377 			ses->swcr_process = swcr_authcompute;
1378 		}
1379 	}
1380 
1381 	return (0);
1382 }
1383 
1384 static int
1385 swcr_setup_aead(struct swcr_session *ses,
1386     const struct crypto_session_params *csp)
1387 {
1388 	struct swcr_auth *swa;
1389 	int error;
1390 
1391 	error = swcr_setup_cipher(ses, csp);
1392 	if (error)
1393 		return (error);
1394 
1395 	swa = &ses->swcr_auth;
1396 	if (csp->csp_auth_mlen == 0)
1397 		swa->sw_mlen = ses->swcr_encdec.sw_exf->macsize;
1398 	else
1399 		swa->sw_mlen = csp->csp_auth_mlen;
1400 	return (0);
1401 }
1402 
1403 static bool
1404 swcr_auth_supported(const struct crypto_session_params *csp)
1405 {
1406 	const struct auth_hash *axf;
1407 
1408 	axf = crypto_auth_hash(csp);
1409 	if (axf == NULL)
1410 		return (false);
1411 	switch (csp->csp_auth_alg) {
1412 	case CRYPTO_SHA1_HMAC:
1413 	case CRYPTO_SHA2_224_HMAC:
1414 	case CRYPTO_SHA2_256_HMAC:
1415 	case CRYPTO_SHA2_384_HMAC:
1416 	case CRYPTO_SHA2_512_HMAC:
1417 	case CRYPTO_NULL_HMAC:
1418 	case CRYPTO_RIPEMD160_HMAC:
1419 		break;
1420 	case CRYPTO_AES_NIST_GMAC:
1421 		switch (csp->csp_auth_klen * 8) {
1422 		case 128:
1423 		case 192:
1424 		case 256:
1425 			break;
1426 		default:
1427 			return (false);
1428 		}
1429 		if (csp->csp_auth_key == NULL)
1430 			return (false);
1431 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1432 			return (false);
1433 		break;
1434 	case CRYPTO_POLY1305:
1435 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1436 			return (false);
1437 		break;
1438 	case CRYPTO_AES_CCM_CBC_MAC:
1439 		switch (csp->csp_auth_klen * 8) {
1440 		case 128:
1441 		case 192:
1442 		case 256:
1443 			break;
1444 		default:
1445 			return (false);
1446 		}
1447 		if (csp->csp_auth_key == NULL)
1448 			return (false);
1449 		break;
1450 	}
1451 	return (true);
1452 }
1453 
1454 static bool
1455 swcr_cipher_supported(const struct crypto_session_params *csp)
1456 {
1457 	const struct enc_xform *txf;
1458 
1459 	txf = crypto_cipher(csp);
1460 	if (txf == NULL)
1461 		return (false);
1462 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1463 	    txf->ivsize != csp->csp_ivlen)
1464 		return (false);
1465 	return (true);
1466 }
1467 
1468 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
1469 
1470 static int
1471 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1472 {
1473 	if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
1474 		return (EINVAL);
1475 	switch (csp->csp_mode) {
1476 	case CSP_MODE_COMPRESS:
1477 		switch (csp->csp_cipher_alg) {
1478 		case CRYPTO_DEFLATE_COMP:
1479 			break;
1480 		default:
1481 			return (EINVAL);
1482 		}
1483 		break;
1484 	case CSP_MODE_CIPHER:
1485 		switch (csp->csp_cipher_alg) {
1486 		case CRYPTO_AES_NIST_GCM_16:
1487 		case CRYPTO_AES_CCM_16:
1488 		case CRYPTO_CHACHA20_POLY1305:
1489 		case CRYPTO_XCHACHA20_POLY1305:
1490 			return (EINVAL);
1491 		default:
1492 			if (!swcr_cipher_supported(csp))
1493 				return (EINVAL);
1494 			break;
1495 		}
1496 		break;
1497 	case CSP_MODE_DIGEST:
1498 		if (!swcr_auth_supported(csp))
1499 			return (EINVAL);
1500 		break;
1501 	case CSP_MODE_AEAD:
1502 		switch (csp->csp_cipher_alg) {
1503 		case CRYPTO_AES_NIST_GCM_16:
1504 		case CRYPTO_AES_CCM_16:
1505 			switch (csp->csp_cipher_klen * 8) {
1506 			case 128:
1507 			case 192:
1508 			case 256:
1509 				break;
1510 			default:
1511 				return (EINVAL);
1512 			}
1513 			break;
1514 		case CRYPTO_CHACHA20_POLY1305:
1515 		case CRYPTO_XCHACHA20_POLY1305:
1516 			break;
1517 		default:
1518 			return (EINVAL);
1519 		}
1520 		break;
1521 	case CSP_MODE_ETA:
1522 		/* AEAD algorithms cannot be used for EtA. */
1523 		switch (csp->csp_cipher_alg) {
1524 		case CRYPTO_AES_NIST_GCM_16:
1525 		case CRYPTO_AES_CCM_16:
1526 		case CRYPTO_CHACHA20_POLY1305:
1527 		case CRYPTO_XCHACHA20_POLY1305:
1528 			return (EINVAL);
1529 		}
1530 		switch (csp->csp_auth_alg) {
1531 		case CRYPTO_AES_NIST_GMAC:
1532 		case CRYPTO_AES_CCM_CBC_MAC:
1533 			return (EINVAL);
1534 		}
1535 
1536 		if (!swcr_cipher_supported(csp) ||
1537 		    !swcr_auth_supported(csp))
1538 			return (EINVAL);
1539 		break;
1540 	default:
1541 		return (EINVAL);
1542 	}
1543 
1544 	return (CRYPTODEV_PROBE_SOFTWARE);
1545 }
1546 
1547 /*
1548  * Generate a new software session.
1549  */
1550 static int
1551 swcr_newsession(device_t dev, crypto_session_t cses,
1552     const struct crypto_session_params *csp)
1553 {
1554 	struct swcr_session *ses;
1555 	const struct comp_algo *cxf;
1556 	int error;
1557 
1558 	ses = crypto_get_driver_session(cses);
1559 
1560 	error = 0;
1561 	switch (csp->csp_mode) {
1562 	case CSP_MODE_COMPRESS:
1563 		switch (csp->csp_cipher_alg) {
1564 		case CRYPTO_DEFLATE_COMP:
1565 			cxf = &comp_algo_deflate;
1566 			break;
1567 #ifdef INVARIANTS
1568 		default:
1569 			panic("bad compression algo");
1570 #endif
1571 		}
1572 		ses->swcr_compdec.sw_cxf = cxf;
1573 		ses->swcr_process = swcr_compdec;
1574 		break;
1575 	case CSP_MODE_CIPHER:
1576 		switch (csp->csp_cipher_alg) {
1577 		case CRYPTO_NULL_CBC:
1578 			ses->swcr_process = swcr_null;
1579 			break;
1580 #ifdef INVARIANTS
1581 		case CRYPTO_AES_NIST_GCM_16:
1582 		case CRYPTO_AES_CCM_16:
1583 		case CRYPTO_CHACHA20_POLY1305:
1584 		case CRYPTO_XCHACHA20_POLY1305:
1585 			panic("bad cipher algo");
1586 #endif
1587 		default:
1588 			error = swcr_setup_cipher(ses, csp);
1589 			if (error == 0)
1590 				ses->swcr_process = swcr_encdec;
1591 		}
1592 		break;
1593 	case CSP_MODE_DIGEST:
1594 		error = swcr_setup_auth(ses, csp);
1595 		break;
1596 	case CSP_MODE_AEAD:
1597 		switch (csp->csp_cipher_alg) {
1598 		case CRYPTO_AES_NIST_GCM_16:
1599 			error = swcr_setup_aead(ses, csp);
1600 			if (error == 0)
1601 				ses->swcr_process = swcr_gcm;
1602 			break;
1603 		case CRYPTO_AES_CCM_16:
1604 			error = swcr_setup_aead(ses, csp);
1605 			if (error == 0)
1606 				ses->swcr_process = swcr_ccm;
1607 			break;
1608 		case CRYPTO_CHACHA20_POLY1305:
1609 		case CRYPTO_XCHACHA20_POLY1305:
1610 			error = swcr_setup_aead(ses, csp);
1611 			if (error == 0)
1612 				ses->swcr_process = swcr_chacha20_poly1305;
1613 			break;
1614 #ifdef INVARIANTS
1615 		default:
1616 			panic("bad aead algo");
1617 #endif
1618 		}
1619 		break;
1620 	case CSP_MODE_ETA:
1621 #ifdef INVARIANTS
1622 		switch (csp->csp_cipher_alg) {
1623 		case CRYPTO_AES_NIST_GCM_16:
1624 		case CRYPTO_AES_CCM_16:
1625 		case CRYPTO_CHACHA20_POLY1305:
1626 		case CRYPTO_XCHACHA20_POLY1305:
1627 			panic("bad eta cipher algo");
1628 		}
1629 		switch (csp->csp_auth_alg) {
1630 		case CRYPTO_AES_NIST_GMAC:
1631 		case CRYPTO_AES_CCM_CBC_MAC:
1632 			panic("bad eta auth algo");
1633 		}
1634 #endif
1635 
1636 		error = swcr_setup_auth(ses, csp);
1637 		if (error)
1638 			break;
1639 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1640 			/* Effectively degrade to digest mode. */
1641 			ses->swcr_process = swcr_authcompute;
1642 			break;
1643 		}
1644 
1645 		error = swcr_setup_cipher(ses, csp);
1646 		if (error == 0)
1647 			ses->swcr_process = swcr_eta;
1648 		break;
1649 	default:
1650 		error = EINVAL;
1651 	}
1652 
1653 	if (error)
1654 		swcr_freesession(dev, cses);
1655 	return (error);
1656 }
1657 
1658 static void
1659 swcr_freesession(device_t dev, crypto_session_t cses)
1660 {
1661 	struct swcr_session *ses;
1662 
1663 	ses = crypto_get_driver_session(cses);
1664 
1665 	zfree(ses->swcr_encdec.sw_ctx, M_CRYPTO_DATA);
1666 	zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
1667 	zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
1668 }
1669 
1670 /*
1671  * Process a software request.
1672  */
1673 static int
1674 swcr_process(device_t dev, struct cryptop *crp, int hint)
1675 {
1676 	struct swcr_session *ses;
1677 
1678 	ses = crypto_get_driver_session(crp->crp_session);
1679 
1680 	crp->crp_etype = ses->swcr_process(ses, crp);
1681 
1682 	crypto_done(crp);
1683 	return (0);
1684 }
1685 
1686 static void
1687 swcr_identify(driver_t *drv, device_t parent)
1688 {
1689 	/* NB: order 10 is so we get attached after h/w devices */
1690 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1691 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1692 		panic("cryptosoft: could not attach");
1693 }
1694 
1695 static int
1696 swcr_probe(device_t dev)
1697 {
1698 	device_set_desc(dev, "software crypto");
1699 	device_quiet(dev);
1700 	return (BUS_PROBE_NOWILDCARD);
1701 }
1702 
1703 static int
1704 swcr_attach(device_t dev)
1705 {
1706 
1707 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1708 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1709 	if (swcr_id < 0) {
1710 		device_printf(dev, "cannot initialize!");
1711 		return (ENXIO);
1712 	}
1713 
1714 	return (0);
1715 }
1716 
1717 static int
1718 swcr_detach(device_t dev)
1719 {
1720 	crypto_unregister_all(swcr_id);
1721 	return 0;
1722 }
1723 
1724 static device_method_t swcr_methods[] = {
1725 	DEVMETHOD(device_identify,	swcr_identify),
1726 	DEVMETHOD(device_probe,		swcr_probe),
1727 	DEVMETHOD(device_attach,	swcr_attach),
1728 	DEVMETHOD(device_detach,	swcr_detach),
1729 
1730 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1731 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1732 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1733 	DEVMETHOD(cryptodev_process,	swcr_process),
1734 
1735 	{0, 0},
1736 };
1737 
1738 static driver_t swcr_driver = {
1739 	"cryptosoft",
1740 	swcr_methods,
1741 	0,		/* NB: no softc */
1742 };
1743 
1744 /*
1745  * NB: We explicitly reference the crypto module so we
1746  * get the necessary ordering when built as a loadable
1747  * module.  This is required because we bundle the crypto
1748  * module code together with the cryptosoft driver (otherwise
1749  * normal module dependencies would handle things).
1750  */
1751 extern int crypto_modevent(struct module *, int, void *);
1752 /* XXX where to attach */
1753 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, crypto_modevent, NULL);
1754 MODULE_VERSION(cryptosoft, 1);
1755 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1756