xref: /freebsd/sys/opencrypto/cryptosoft.c (revision fdafd315ad0d0f28a11b9fb4476a9ab059c62b92)
1 /*	$OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $	*/
2 
3 /*-
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting
6  *
7  * This code was written by Angelos D. Keromytis in Athens, Greece, in
8  * February 2000. Network Security Technologies Inc. (NSTI) kindly
9  * supported the development of this code.
10  *
11  * Copyright (c) 2000, 2001 Angelos D. Keromytis
12  * Copyright (c) 2014-2021 The FreeBSD Foundation
13  * All rights reserved.
14  *
15  * Portions of this software were developed by John-Mark Gurney
16  * under sponsorship of the FreeBSD Foundation and
17  * Rubicon Communications, LLC (Netgate).
18  *
19  * Portions of this software were developed by Ararat River
20  * Consulting, LLC under sponsorship of the FreeBSD Foundation.
21  *
22  * Permission to use, copy, and modify this software with or without fee
23  * is hereby granted, provided that this entire notice is included in
24  * all source code copies of any software which is or includes a copy or
25  * modification of this software.
26  *
27  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
28  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
29  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
30  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
31  * PURPOSE.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/mbuf.h>
38 #include <sys/module.h>
39 #include <sys/sysctl.h>
40 #include <sys/errno.h>
41 #include <sys/random.h>
42 #include <sys/kernel.h>
43 #include <sys/uio.h>
44 #include <sys/endian.h>
45 #include <sys/limits.h>
46 
47 #include <crypto/sha1.h>
48 #include <opencrypto/rmd160.h>
49 
50 #include <opencrypto/cryptodev.h>
51 #include <opencrypto/xform.h>
52 
53 #include <sys/kobj.h>
54 #include <sys/bus.h>
55 #include "cryptodev_if.h"
56 
57 struct swcr_auth {
58 	void		*sw_ictx;
59 	void		*sw_octx;
60 	const struct auth_hash *sw_axf;
61 	uint16_t	sw_mlen;
62 	bool		sw_hmac;
63 };
64 
65 struct swcr_encdec {
66 	void		*sw_ctx;
67 	const struct enc_xform *sw_exf;
68 };
69 
70 struct swcr_compdec {
71 	const struct comp_algo *sw_cxf;
72 };
73 
74 struct swcr_session {
75 	int	(*swcr_process)(const struct swcr_session *, struct cryptop *);
76 
77 	struct swcr_auth swcr_auth;
78 	struct swcr_encdec swcr_encdec;
79 	struct swcr_compdec swcr_compdec;
80 };
81 
82 static	int32_t swcr_id;
83 
84 static	void swcr_freesession(device_t dev, crypto_session_t cses);
85 
86 /* Used for CRYPTO_NULL_CBC. */
87 static int
swcr_null(const struct swcr_session * ses,struct cryptop * crp)88 swcr_null(const struct swcr_session *ses, struct cryptop *crp)
89 {
90 
91 	return (0);
92 }
93 
94 /*
95  * Apply a symmetric encryption/decryption algorithm.
96  */
97 static int
swcr_encdec(const struct swcr_session * ses,struct cryptop * crp)98 swcr_encdec(const struct swcr_session *ses, struct cryptop *crp)
99 {
100 	unsigned char blk[EALG_MAX_BLOCK_LEN];
101 	const struct crypto_session_params *csp;
102 	const struct enc_xform *exf;
103 	const struct swcr_encdec *sw;
104 	void *ctx;
105 	size_t inlen, outlen, todo;
106 	int blksz, resid;
107 	struct crypto_buffer_cursor cc_in, cc_out;
108 	const unsigned char *inblk;
109 	unsigned char *outblk;
110 	int error;
111 	bool encrypting;
112 
113 	error = 0;
114 
115 	sw = &ses->swcr_encdec;
116 	exf = sw->sw_exf;
117 	csp = crypto_get_params(crp->crp_session);
118 
119 	if (exf->native_blocksize == 0) {
120 		/* Check for non-padded data */
121 		if ((crp->crp_payload_length % exf->blocksize) != 0)
122 			return (EINVAL);
123 
124 		blksz = exf->blocksize;
125 	} else
126 		blksz = exf->native_blocksize;
127 
128 	if (exf == &enc_xform_aes_icm &&
129 	    (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
130 		return (EINVAL);
131 
132 	ctx = __builtin_alloca(exf->ctxsize);
133 	if (crp->crp_cipher_key != NULL) {
134 		error = exf->setkey(ctx, crp->crp_cipher_key,
135 		    csp->csp_cipher_klen);
136 		if (error)
137 			return (error);
138 	} else
139 		memcpy(ctx, sw->sw_ctx, exf->ctxsize);
140 
141 	crypto_read_iv(crp, blk);
142 	exf->reinit(ctx, blk, csp->csp_ivlen);
143 
144 	crypto_cursor_init(&cc_in, &crp->crp_buf);
145 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
146 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
147 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
148 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
149 	} else
150 		cc_out = cc_in;
151 
152 	encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
153 
154 	/*
155 	 * Loop through encrypting blocks.  'inlen' is the remaining
156 	 * length of the current segment in the input buffer.
157 	 * 'outlen' is the remaining length of current segment in the
158 	 * output buffer.
159 	 */
160 	inlen = outlen = 0;
161 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= todo) {
162 		if (inlen == 0)
163 			inblk = crypto_cursor_segment(&cc_in, &inlen);
164 		if (outlen == 0)
165 			outblk = crypto_cursor_segment(&cc_out, &outlen);
166 
167 		/*
168 		 * If the current block is not contained within the
169 		 * current input/output segment, use 'blk' as a local
170 		 * buffer.
171 		 */
172 		if (inlen < blksz) {
173 			crypto_cursor_copydata(&cc_in, blksz, blk);
174 			inblk = blk;
175 			inlen = blksz;
176 		}
177 		if (outlen < blksz) {
178 			outblk = blk;
179 			outlen = blksz;
180 		}
181 
182 		todo = rounddown2(MIN(resid, MIN(inlen, outlen)), blksz);
183 
184 		if (encrypting)
185 			exf->encrypt_multi(ctx, inblk, outblk, todo);
186 		else
187 			exf->decrypt_multi(ctx, inblk, outblk, todo);
188 
189 		if (inblk == blk) {
190 			inblk = crypto_cursor_segment(&cc_in, &inlen);
191 		} else {
192 			crypto_cursor_advance(&cc_in, todo);
193 			inlen -= todo;
194 			inblk += todo;
195 		}
196 
197 		if (outblk == blk) {
198 			crypto_cursor_copyback(&cc_out, blksz, blk);
199 			outblk = crypto_cursor_segment(&cc_out, &outlen);
200 		} else {
201 			crypto_cursor_advance(&cc_out, todo);
202 			outlen -= todo;
203 			outblk += todo;
204 		}
205 	}
206 
207 	/* Handle trailing partial block for stream ciphers. */
208 	if (resid > 0) {
209 		KASSERT(exf->native_blocksize != 0,
210 		    ("%s: partial block of %d bytes for cipher %s",
211 		    __func__, resid, exf->name));
212 		KASSERT(resid < blksz, ("%s: partial block too big", __func__));
213 
214 		inblk = crypto_cursor_segment(&cc_in, &inlen);
215 		outblk = crypto_cursor_segment(&cc_out, &outlen);
216 		if (inlen < resid) {
217 			crypto_cursor_copydata(&cc_in, resid, blk);
218 			inblk = blk;
219 		}
220 		if (outlen < resid)
221 			outblk = blk;
222 		if (encrypting)
223 			exf->encrypt_last(ctx, inblk, outblk,
224 			    resid);
225 		else
226 			exf->decrypt_last(ctx, inblk, outblk,
227 			    resid);
228 		if (outlen < resid)
229 			crypto_cursor_copyback(&cc_out, resid, blk);
230 	}
231 
232 	explicit_bzero(ctx, exf->ctxsize);
233 	explicit_bzero(blk, sizeof(blk));
234 	return (0);
235 }
236 
237 /*
238  * Compute or verify hash.
239  */
240 static int
swcr_authcompute(const struct swcr_session * ses,struct cryptop * crp)241 swcr_authcompute(const struct swcr_session *ses, struct cryptop *crp)
242 {
243 	struct {
244 		union authctx ctx;
245 		u_char aalg[HASH_MAX_LEN];
246 		u_char uaalg[HASH_MAX_LEN];
247 	} s;
248 	const struct crypto_session_params *csp;
249 	const struct swcr_auth *sw;
250 	const struct auth_hash *axf;
251 	int err;
252 
253 	sw = &ses->swcr_auth;
254 
255 	axf = sw->sw_axf;
256 
257 	csp = crypto_get_params(crp->crp_session);
258 	if (crp->crp_auth_key != NULL) {
259 		if (sw->sw_hmac) {
260 			hmac_init_ipad(axf, crp->crp_auth_key,
261 			    csp->csp_auth_klen, &s.ctx);
262 		} else {
263 			axf->Init(&s.ctx);
264 			axf->Setkey(&s.ctx, crp->crp_auth_key,
265 			    csp->csp_auth_klen);
266 		}
267 	} else
268 		memcpy(&s.ctx, sw->sw_ictx, axf->ctxsize);
269 
270 	if (crp->crp_aad != NULL)
271 		err = axf->Update(&s.ctx, crp->crp_aad, crp->crp_aad_length);
272 	else
273 		err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
274 		    axf->Update, &s.ctx);
275 	if (err)
276 		goto out;
277 
278 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp) &&
279 	    CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
280 		err = crypto_apply_buf(&crp->crp_obuf,
281 		    crp->crp_payload_output_start, crp->crp_payload_length,
282 		    axf->Update, &s.ctx);
283 	else
284 		err = crypto_apply(crp, crp->crp_payload_start,
285 		    crp->crp_payload_length, axf->Update, &s.ctx);
286 	if (err)
287 		goto out;
288 
289 	if (csp->csp_flags & CSP_F_ESN)
290 		axf->Update(&s.ctx, crp->crp_esn, 4);
291 
292 	axf->Final(s.aalg, &s.ctx);
293 	if (sw->sw_hmac) {
294 		if (crp->crp_auth_key != NULL)
295 			hmac_init_opad(axf, crp->crp_auth_key,
296 			    csp->csp_auth_klen, &s.ctx);
297 		else
298 			memcpy(&s.ctx, sw->sw_octx, axf->ctxsize);
299 		axf->Update(&s.ctx, s.aalg, axf->hashsize);
300 		axf->Final(s.aalg, &s.ctx);
301 	}
302 
303 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
304 		crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, s.uaalg);
305 		if (timingsafe_bcmp(s.aalg, s.uaalg, sw->sw_mlen) != 0)
306 			err = EBADMSG;
307 	} else {
308 		/* Inject the authentication data */
309 		crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, s.aalg);
310 	}
311 out:
312 	explicit_bzero(&s, sizeof(s));
313 	return (err);
314 }
315 
316 CTASSERT(INT_MAX <= (1ll<<39) - 256);	/* GCM: plain text < 2^39-256 */
317 CTASSERT(INT_MAX <= (uint64_t)-1);	/* GCM: associated data <= 2^64-1 */
318 
319 static int
swcr_gmac(const struct swcr_session * ses,struct cryptop * crp)320 swcr_gmac(const struct swcr_session *ses, struct cryptop *crp)
321 {
322 	struct {
323 		union authctx ctx;
324 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
325 		u_char tag[GMAC_DIGEST_LEN];
326 		u_char tag2[GMAC_DIGEST_LEN];
327 	} s;
328 	u_char *blk = (u_char *)s.blkbuf;
329 	struct crypto_buffer_cursor cc;
330 	const u_char *inblk;
331 	const struct swcr_auth *swa;
332 	const struct auth_hash *axf;
333 	uint32_t *blkp;
334 	size_t len;
335 	int blksz, error, ivlen, resid;
336 
337 	swa = &ses->swcr_auth;
338 	axf = swa->sw_axf;
339 	blksz = GMAC_BLOCK_LEN;
340 	KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch",
341 	    __func__));
342 
343 	if (crp->crp_auth_key != NULL) {
344 		axf->Init(&s.ctx);
345 		axf->Setkey(&s.ctx, crp->crp_auth_key,
346 		    crypto_get_params(crp->crp_session)->csp_auth_klen);
347 	} else
348 		memcpy(&s.ctx, swa->sw_ictx, axf->ctxsize);
349 
350 	/* Initialize the IV */
351 	ivlen = AES_GCM_IV_LEN;
352 	crypto_read_iv(crp, blk);
353 
354 	axf->Reinit(&s.ctx, blk, ivlen);
355 	crypto_cursor_init(&cc, &crp->crp_buf);
356 	crypto_cursor_advance(&cc, crp->crp_payload_start);
357 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) {
358 		inblk = crypto_cursor_segment(&cc, &len);
359 		if (len >= blksz) {
360 			len = rounddown(MIN(len, resid), blksz);
361 			crypto_cursor_advance(&cc, len);
362 		} else {
363 			len = blksz;
364 			crypto_cursor_copydata(&cc, len, blk);
365 			inblk = blk;
366 		}
367 		axf->Update(&s.ctx, inblk, len);
368 	}
369 	if (resid > 0) {
370 		memset(blk, 0, blksz);
371 		crypto_cursor_copydata(&cc, resid, blk);
372 		axf->Update(&s.ctx, blk, blksz);
373 	}
374 
375 	/* length block */
376 	memset(blk, 0, blksz);
377 	blkp = (uint32_t *)blk + 1;
378 	*blkp = htobe32(crp->crp_payload_length * 8);
379 	axf->Update(&s.ctx, blk, blksz);
380 
381 	/* Finalize MAC */
382 	axf->Final(s.tag, &s.ctx);
383 
384 	error = 0;
385 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
386 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
387 		    s.tag2);
388 		if (timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen) != 0)
389 			error = EBADMSG;
390 	} else {
391 		/* Inject the authentication data */
392 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, s.tag);
393 	}
394 	explicit_bzero(&s, sizeof(s));
395 	return (error);
396 }
397 
398 static int
swcr_gcm(const struct swcr_session * ses,struct cryptop * crp)399 swcr_gcm(const struct swcr_session *ses, struct cryptop *crp)
400 {
401 	struct {
402 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
403 		u_char tag[GMAC_DIGEST_LEN];
404 		u_char tag2[GMAC_DIGEST_LEN];
405 	} s;
406 	u_char *blk = (u_char *)s.blkbuf;
407 	struct crypto_buffer_cursor cc_in, cc_out;
408 	const u_char *inblk;
409 	u_char *outblk;
410 	size_t inlen, outlen, todo;
411 	const struct swcr_auth *swa;
412 	const struct swcr_encdec *swe;
413 	const struct enc_xform *exf;
414 	void *ctx;
415 	uint32_t *blkp;
416 	int blksz, error, ivlen, r, resid;
417 
418 	swa = &ses->swcr_auth;
419 	swe = &ses->swcr_encdec;
420 	exf = swe->sw_exf;
421 	blksz = GMAC_BLOCK_LEN;
422 	KASSERT(blksz == exf->native_blocksize,
423 	    ("%s: blocksize mismatch", __func__));
424 
425 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
426 		return (EINVAL);
427 
428 	ivlen = AES_GCM_IV_LEN;
429 
430 	ctx = __builtin_alloca(exf->ctxsize);
431 	if (crp->crp_cipher_key != NULL)
432 		exf->setkey(ctx, crp->crp_cipher_key,
433 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
434 	else
435 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
436 	exf->reinit(ctx, crp->crp_iv, ivlen);
437 
438 	/* Supply MAC with AAD */
439 	if (crp->crp_aad != NULL) {
440 		inlen = rounddown2(crp->crp_aad_length, blksz);
441 		if (inlen != 0)
442 			exf->update(ctx, crp->crp_aad, inlen);
443 		if (crp->crp_aad_length != inlen) {
444 			memset(blk, 0, blksz);
445 			memcpy(blk, (char *)crp->crp_aad + inlen,
446 			    crp->crp_aad_length - inlen);
447 			exf->update(ctx, blk, blksz);
448 		}
449 	} else {
450 		crypto_cursor_init(&cc_in, &crp->crp_buf);
451 		crypto_cursor_advance(&cc_in, crp->crp_aad_start);
452 		for (resid = crp->crp_aad_length; resid >= blksz;
453 		     resid -= inlen) {
454 			inblk = crypto_cursor_segment(&cc_in, &inlen);
455 			if (inlen >= blksz) {
456 				inlen = rounddown2(MIN(inlen, resid), blksz);
457 				crypto_cursor_advance(&cc_in, inlen);
458 			} else {
459 				inlen = blksz;
460 				crypto_cursor_copydata(&cc_in, inlen, blk);
461 				inblk = blk;
462 			}
463 			exf->update(ctx, inblk, inlen);
464 		}
465 		if (resid > 0) {
466 			memset(blk, 0, blksz);
467 			crypto_cursor_copydata(&cc_in, resid, blk);
468 			exf->update(ctx, blk, blksz);
469 		}
470 	}
471 
472 	/* Do encryption with MAC */
473 	crypto_cursor_init(&cc_in, &crp->crp_buf);
474 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
475 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
476 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
477 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
478 	} else
479 		cc_out = cc_in;
480 
481 	inlen = outlen = 0;
482 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= todo) {
483 		if (inlen == 0)
484 			inblk = crypto_cursor_segment(&cc_in, &inlen);
485 		if (outlen == 0)
486 			outblk = crypto_cursor_segment(&cc_out, &outlen);
487 
488 		if (inlen < blksz) {
489 			crypto_cursor_copydata(&cc_in, blksz, blk);
490 			inblk = blk;
491 			inlen = blksz;
492 		}
493 
494 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
495 			if (outlen < blksz) {
496 				outblk = blk;
497 				outlen = blksz;
498 			}
499 
500 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
501 			    blksz);
502 
503 			exf->encrypt_multi(ctx, inblk, outblk, todo);
504 			exf->update(ctx, outblk, todo);
505 
506 			if (outblk == blk) {
507 				crypto_cursor_copyback(&cc_out, blksz, blk);
508 				outblk = crypto_cursor_segment(&cc_out, &outlen);
509 			} else {
510 				crypto_cursor_advance(&cc_out, todo);
511 				outlen -= todo;
512 				outblk += todo;
513 			}
514 		} else {
515 			todo = rounddown2(MIN(resid, inlen), blksz);
516 			exf->update(ctx, inblk, todo);
517 		}
518 
519 		if (inblk == blk) {
520 			inblk = crypto_cursor_segment(&cc_in, &inlen);
521 		} else {
522 			crypto_cursor_advance(&cc_in, todo);
523 			inlen -= todo;
524 			inblk += todo;
525 		}
526 	}
527 	if (resid > 0) {
528 		crypto_cursor_copydata(&cc_in, resid, blk);
529 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
530 			exf->encrypt_last(ctx, blk, blk, resid);
531 			crypto_cursor_copyback(&cc_out, resid, blk);
532 		}
533 		exf->update(ctx, blk, resid);
534 	}
535 
536 	/* length block */
537 	memset(blk, 0, blksz);
538 	blkp = (uint32_t *)blk + 1;
539 	*blkp = htobe32(crp->crp_aad_length * 8);
540 	blkp = (uint32_t *)blk + 3;
541 	*blkp = htobe32(crp->crp_payload_length * 8);
542 	exf->update(ctx, blk, blksz);
543 
544 	/* Finalize MAC */
545 	exf->final(s.tag, ctx);
546 
547 	/* Validate tag */
548 	error = 0;
549 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
550 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
551 		    s.tag2);
552 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
553 		if (r != 0) {
554 			error = EBADMSG;
555 			goto out;
556 		}
557 
558 		/* tag matches, decrypt data */
559 		crypto_cursor_init(&cc_in, &crp->crp_buf);
560 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
561 
562 		inlen = 0;
563 		for (resid = crp->crp_payload_length; resid > blksz;
564 		     resid -= todo) {
565 			if (inlen == 0)
566 				inblk = crypto_cursor_segment(&cc_in, &inlen);
567 			if (outlen == 0)
568 				outblk = crypto_cursor_segment(&cc_out, &outlen);
569 			if (inlen < blksz) {
570 				crypto_cursor_copydata(&cc_in, blksz, blk);
571 				inblk = blk;
572 				inlen = blksz;
573 			}
574 			if (outlen < blksz) {
575 				outblk = blk;
576 				outlen = blksz;
577 			}
578 
579 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
580 			    blksz);
581 
582 			exf->decrypt_multi(ctx, inblk, outblk, todo);
583 
584 			if (inblk == blk) {
585 				inblk = crypto_cursor_segment(&cc_in, &inlen);
586 			} else {
587 				crypto_cursor_advance(&cc_in, todo);
588 				inlen -= todo;
589 				inblk += todo;
590 			}
591 
592 			if (outblk == blk) {
593 				crypto_cursor_copyback(&cc_out, blksz, blk);
594 				outblk = crypto_cursor_segment(&cc_out,
595 				    &outlen);
596 			} else {
597 				crypto_cursor_advance(&cc_out, todo);
598 				outlen -= todo;
599 				outblk += todo;
600 			}
601 		}
602 		if (resid > 0) {
603 			crypto_cursor_copydata(&cc_in, resid, blk);
604 			exf->decrypt_last(ctx, blk, blk, resid);
605 			crypto_cursor_copyback(&cc_out, resid, blk);
606 		}
607 	} else {
608 		/* Inject the authentication data */
609 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
610 		    s.tag);
611 	}
612 
613 out:
614 	explicit_bzero(ctx, exf->ctxsize);
615 	explicit_bzero(&s, sizeof(s));
616 
617 	return (error);
618 }
619 
620 static void
build_ccm_b0(const char * nonce,u_int nonce_length,u_int aad_length,u_int data_length,u_int tag_length,uint8_t * b0)621 build_ccm_b0(const char *nonce, u_int nonce_length, u_int aad_length,
622     u_int data_length, u_int tag_length, uint8_t *b0)
623 {
624 	uint8_t *bp;
625 	uint8_t flags, L;
626 
627 	KASSERT(nonce_length >= 7 && nonce_length <= 13,
628 	    ("nonce_length must be between 7 and 13 bytes"));
629 
630 	/*
631 	 * Need to determine the L field value.  This is the number of
632 	 * bytes needed to specify the length of the message; the length
633 	 * is whatever is left in the 16 bytes after specifying flags and
634 	 * the nonce.
635 	 */
636 	L = 15 - nonce_length;
637 
638 	flags = ((aad_length > 0) << 6) +
639 	    (((tag_length - 2) / 2) << 3) +
640 	    L - 1;
641 
642 	/*
643 	 * Now we need to set up the first block, which has flags, nonce,
644 	 * and the message length.
645 	 */
646 	b0[0] = flags;
647 	memcpy(b0 + 1, nonce, nonce_length);
648 	bp = b0 + 1 + nonce_length;
649 
650 	/* Need to copy L' [aka L-1] bytes of data_length */
651 	for (uint8_t *dst = b0 + CCM_CBC_BLOCK_LEN - 1; dst >= bp; dst--) {
652 		*dst = data_length;
653 		data_length >>= 8;
654 	}
655 }
656 
657 /* NB: OCF only supports AAD lengths < 2^32. */
658 static int
build_ccm_aad_length(u_int aad_length,uint8_t * blk)659 build_ccm_aad_length(u_int aad_length, uint8_t *blk)
660 {
661 	if (aad_length < ((1 << 16) - (1 << 8))) {
662 		be16enc(blk, aad_length);
663 		return (sizeof(uint16_t));
664 	} else {
665 		blk[0] = 0xff;
666 		blk[1] = 0xfe;
667 		be32enc(blk + 2, aad_length);
668 		return (2 + sizeof(uint32_t));
669 	}
670 }
671 
672 static int
swcr_ccm_cbc_mac(const struct swcr_session * ses,struct cryptop * crp)673 swcr_ccm_cbc_mac(const struct swcr_session *ses, struct cryptop *crp)
674 {
675 	struct {
676 		union authctx ctx;
677 		u_char blk[CCM_CBC_BLOCK_LEN];
678 		u_char tag[AES_CBC_MAC_HASH_LEN];
679 		u_char tag2[AES_CBC_MAC_HASH_LEN];
680 	} s;
681 	const struct crypto_session_params *csp;
682 	const struct swcr_auth *swa;
683 	const struct auth_hash *axf;
684 	int error, ivlen, len;
685 
686 	csp = crypto_get_params(crp->crp_session);
687 	swa = &ses->swcr_auth;
688 	axf = swa->sw_axf;
689 
690 	if (crp->crp_auth_key != NULL) {
691 		axf->Init(&s.ctx);
692 		axf->Setkey(&s.ctx, crp->crp_auth_key, csp->csp_auth_klen);
693 	} else
694 		memcpy(&s.ctx, swa->sw_ictx, axf->ctxsize);
695 
696 	/* Initialize the IV */
697 	ivlen = csp->csp_ivlen;
698 
699 	/* Supply MAC with IV */
700 	axf->Reinit(&s.ctx, crp->crp_iv, ivlen);
701 
702 	/* Supply MAC with b0. */
703 	build_ccm_b0(crp->crp_iv, ivlen, crp->crp_payload_length, 0,
704 	    swa->sw_mlen, s.blk);
705 	axf->Update(&s.ctx, s.blk, CCM_CBC_BLOCK_LEN);
706 
707 	len = build_ccm_aad_length(crp->crp_payload_length, s.blk);
708 	axf->Update(&s.ctx, s.blk, len);
709 
710 	crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
711 	    axf->Update, &s.ctx);
712 
713 	/* Finalize MAC */
714 	axf->Final(s.tag, &s.ctx);
715 
716 	error = 0;
717 	if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
718 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
719 		    s.tag2);
720 		if (timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen) != 0)
721 			error = EBADMSG;
722 	} else {
723 		/* Inject the authentication data */
724 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
725 		    s.tag);
726 	}
727 	explicit_bzero(&s, sizeof(s));
728 	return (error);
729 }
730 
731 static int
swcr_ccm(const struct swcr_session * ses,struct cryptop * crp)732 swcr_ccm(const struct swcr_session *ses, struct cryptop *crp)
733 {
734 	const struct crypto_session_params *csp;
735 	struct {
736 		uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))];
737 		u_char tag[AES_CBC_MAC_HASH_LEN];
738 		u_char tag2[AES_CBC_MAC_HASH_LEN];
739 	} s;
740 	u_char *blk = (u_char *)s.blkbuf;
741 	struct crypto_buffer_cursor cc_in, cc_out;
742 	const u_char *inblk;
743 	u_char *outblk;
744 	size_t inlen, outlen, todo;
745 	const struct swcr_auth *swa;
746 	const struct swcr_encdec *swe;
747 	const struct enc_xform *exf;
748 	void *ctx;
749 	size_t len;
750 	int blksz, error, ivlen, r, resid;
751 
752 	csp = crypto_get_params(crp->crp_session);
753 	swa = &ses->swcr_auth;
754 	swe = &ses->swcr_encdec;
755 	exf = swe->sw_exf;
756 	blksz = AES_BLOCK_LEN;
757 	KASSERT(blksz == exf->native_blocksize,
758 	    ("%s: blocksize mismatch", __func__));
759 
760 	if (crp->crp_payload_length > ccm_max_payload_length(csp))
761 		return (EMSGSIZE);
762 
763 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
764 		return (EINVAL);
765 
766 	ivlen = csp->csp_ivlen;
767 
768 	ctx = __builtin_alloca(exf->ctxsize);
769 	if (crp->crp_cipher_key != NULL)
770 		exf->setkey(ctx, crp->crp_cipher_key,
771 		    crypto_get_params(crp->crp_session)->csp_cipher_klen);
772 	else
773 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
774 	exf->reinit(ctx, crp->crp_iv, ivlen);
775 
776 	/* Supply MAC with b0. */
777 	_Static_assert(sizeof(s.blkbuf) >= CCM_CBC_BLOCK_LEN,
778 	    "blkbuf too small for b0");
779 	build_ccm_b0(crp->crp_iv, ivlen, crp->crp_aad_length,
780 	    crp->crp_payload_length, swa->sw_mlen, blk);
781 	exf->update(ctx, blk, CCM_CBC_BLOCK_LEN);
782 
783 	/* Supply MAC with AAD */
784 	if (crp->crp_aad_length != 0) {
785 		len = build_ccm_aad_length(crp->crp_aad_length, blk);
786 		exf->update(ctx, blk, len);
787 		if (crp->crp_aad != NULL)
788 			exf->update(ctx, crp->crp_aad, crp->crp_aad_length);
789 		else
790 			crypto_apply(crp, crp->crp_aad_start,
791 			    crp->crp_aad_length, exf->update, ctx);
792 
793 		/* Pad the AAD (including length field) to a full block. */
794 		len = (len + crp->crp_aad_length) % CCM_CBC_BLOCK_LEN;
795 		if (len != 0) {
796 			len = CCM_CBC_BLOCK_LEN - len;
797 			memset(blk, 0, CCM_CBC_BLOCK_LEN);
798 			exf->update(ctx, blk, len);
799 		}
800 	}
801 
802 	/* Do encryption/decryption with MAC */
803 	crypto_cursor_init(&cc_in, &crp->crp_buf);
804 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
805 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
806 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
807 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
808 	} else
809 		cc_out = cc_in;
810 
811 	inlen = outlen = 0;
812 	for (resid = crp->crp_payload_length; resid >= blksz; resid -= todo) {
813 		if (inlen == 0)
814 			inblk = crypto_cursor_segment(&cc_in, &inlen);
815 		if (outlen == 0)
816 			outblk = crypto_cursor_segment(&cc_out, &outlen);
817 
818 		if (inlen < blksz) {
819 			crypto_cursor_copydata(&cc_in, blksz, blk);
820 			inblk = blk;
821 			inlen = blksz;
822 		}
823 
824 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
825 			if (outlen < blksz) {
826 				outblk = blk;
827 				outlen = blksz;
828 			}
829 
830 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
831 			    blksz);
832 
833 			exf->update(ctx, inblk, todo);
834 			exf->encrypt_multi(ctx, inblk, outblk, todo);
835 
836 			if (outblk == blk) {
837 				crypto_cursor_copyback(&cc_out, blksz, blk);
838 				outblk = crypto_cursor_segment(&cc_out, &outlen);
839 			} else {
840 				crypto_cursor_advance(&cc_out, todo);
841 				outlen -= todo;
842 				outblk += todo;
843 			}
844 		} else {
845 			/*
846 			 * One of the problems with CCM+CBC is that
847 			 * the authentication is done on the
848 			 * unencrypted data.  As a result, we have to
849 			 * decrypt the data twice: once to generate
850 			 * the tag and a second time after the tag is
851 			 * verified.
852 			 */
853 			todo = blksz;
854 			exf->decrypt(ctx, inblk, blk);
855 			exf->update(ctx, blk, todo);
856 		}
857 
858 		if (inblk == blk) {
859 			inblk = crypto_cursor_segment(&cc_in, &inlen);
860 		} else {
861 			crypto_cursor_advance(&cc_in, todo);
862 			inlen -= todo;
863 			inblk += todo;
864 		}
865 	}
866 	if (resid > 0) {
867 		crypto_cursor_copydata(&cc_in, resid, blk);
868 		if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
869 			exf->update(ctx, blk, resid);
870 			exf->encrypt_last(ctx, blk, blk, resid);
871 			crypto_cursor_copyback(&cc_out, resid, blk);
872 		} else {
873 			exf->decrypt_last(ctx, blk, blk, resid);
874 			exf->update(ctx, blk, resid);
875 		}
876 	}
877 
878 	/* Finalize MAC */
879 	exf->final(s.tag, ctx);
880 
881 	/* Validate tag */
882 	error = 0;
883 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
884 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
885 		    s.tag2);
886 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
887 		if (r != 0) {
888 			error = EBADMSG;
889 			goto out;
890 		}
891 
892 		/* tag matches, decrypt data */
893 		exf->reinit(ctx, crp->crp_iv, ivlen);
894 		crypto_cursor_init(&cc_in, &crp->crp_buf);
895 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
896 
897 		inlen = 0;
898 		for (resid = crp->crp_payload_length; resid >= blksz;
899 		     resid -= todo) {
900 			if (inlen == 0)
901 				inblk = crypto_cursor_segment(&cc_in, &inlen);
902 			if (outlen == 0)
903 				outblk = crypto_cursor_segment(&cc_out,
904 				    &outlen);
905 
906 			if (inlen < blksz) {
907 				crypto_cursor_copydata(&cc_in, blksz, blk);
908 				inblk = blk;
909 				inlen = blksz;
910 			}
911 			if (outlen < blksz) {
912 				outblk = blk;
913 				outlen = blksz;
914 			}
915 
916 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
917 			    blksz);
918 
919 			exf->decrypt_multi(ctx, inblk, outblk, todo);
920 
921 			if (inblk == blk) {
922 				inblk = crypto_cursor_segment(&cc_in, &inlen);
923 			} else {
924 				crypto_cursor_advance(&cc_in, todo);
925 				inlen -= todo;
926 				inblk += todo;
927 			}
928 
929 			if (outblk == blk) {
930 				crypto_cursor_copyback(&cc_out, blksz, blk);
931 				outblk = crypto_cursor_segment(&cc_out,
932 				    &outlen);
933 			} else {
934 				crypto_cursor_advance(&cc_out, todo);
935 				outlen -= todo;
936 				outblk += todo;
937 			}
938 		}
939 		if (resid > 0) {
940 			crypto_cursor_copydata(&cc_in, resid, blk);
941 			exf->decrypt_last(ctx, blk, blk, resid);
942 			crypto_cursor_copyback(&cc_out, resid, blk);
943 		}
944 	} else {
945 		/* Inject the authentication data */
946 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
947 		    s.tag);
948 	}
949 
950 out:
951 	explicit_bzero(ctx, exf->ctxsize);
952 	explicit_bzero(&s, sizeof(s));
953 	return (error);
954 }
955 
956 static int
swcr_chacha20_poly1305(const struct swcr_session * ses,struct cryptop * crp)957 swcr_chacha20_poly1305(const struct swcr_session *ses, struct cryptop *crp)
958 {
959 	const struct crypto_session_params *csp;
960 	struct {
961 		uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))];
962 		u_char tag[POLY1305_HASH_LEN];
963 		u_char tag2[POLY1305_HASH_LEN];
964 	} s;
965 	u_char *blk = (u_char *)s.blkbuf;
966 	struct crypto_buffer_cursor cc_in, cc_out;
967 	const u_char *inblk;
968 	u_char *outblk;
969 	size_t inlen, outlen, todo;
970 	uint64_t *blkp;
971 	const struct swcr_auth *swa;
972 	const struct swcr_encdec *swe;
973 	const struct enc_xform *exf;
974 	void *ctx;
975 	int blksz, error, r, resid;
976 
977 	swa = &ses->swcr_auth;
978 	swe = &ses->swcr_encdec;
979 	exf = swe->sw_exf;
980 	blksz = exf->native_blocksize;
981 	KASSERT(blksz <= sizeof(s.blkbuf), ("%s: blocksize mismatch", __func__));
982 
983 	if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
984 		return (EINVAL);
985 
986 	csp = crypto_get_params(crp->crp_session);
987 
988 	ctx = __builtin_alloca(exf->ctxsize);
989 	if (crp->crp_cipher_key != NULL)
990 		exf->setkey(ctx, crp->crp_cipher_key,
991 		    csp->csp_cipher_klen);
992 	else
993 		memcpy(ctx, swe->sw_ctx, exf->ctxsize);
994 	exf->reinit(ctx, crp->crp_iv, csp->csp_ivlen);
995 
996 	/* Supply MAC with AAD */
997 	if (crp->crp_aad != NULL)
998 		exf->update(ctx, crp->crp_aad, crp->crp_aad_length);
999 	else
1000 		crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
1001 		    exf->update, ctx);
1002 	if (crp->crp_aad_length % POLY1305_BLOCK_LEN != 0) {
1003 		/* padding1 */
1004 		memset(blk, 0, POLY1305_BLOCK_LEN);
1005 		exf->update(ctx, blk, POLY1305_BLOCK_LEN -
1006 		    crp->crp_aad_length % POLY1305_BLOCK_LEN);
1007 	}
1008 
1009 	/* Do encryption with MAC */
1010 	crypto_cursor_init(&cc_in, &crp->crp_buf);
1011 	crypto_cursor_advance(&cc_in, crp->crp_payload_start);
1012 	if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
1013 		crypto_cursor_init(&cc_out, &crp->crp_obuf);
1014 		crypto_cursor_advance(&cc_out, crp->crp_payload_output_start);
1015 	} else
1016 		cc_out = cc_in;
1017 
1018 	inlen = outlen = 0;
1019 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1020 		for (resid = crp->crp_payload_length; resid >= blksz;
1021 		     resid -= todo) {
1022 			if (inlen == 0)
1023 				inblk = crypto_cursor_segment(&cc_in, &inlen);
1024 			if (outlen == 0)
1025 				outblk = crypto_cursor_segment(&cc_out,
1026 				    &outlen);
1027 
1028 			if (inlen < blksz) {
1029 				crypto_cursor_copydata(&cc_in, blksz, blk);
1030 				inblk = blk;
1031 				inlen = blksz;
1032 			}
1033 
1034 			if (outlen < blksz) {
1035 				outblk = blk;
1036 				outlen = blksz;
1037 			}
1038 
1039 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
1040 			    blksz);
1041 
1042 			exf->encrypt_multi(ctx, inblk, outblk, todo);
1043 			exf->update(ctx, outblk, todo);
1044 
1045 			if (inblk == blk) {
1046 				inblk = crypto_cursor_segment(&cc_in, &inlen);
1047 			} else {
1048 				crypto_cursor_advance(&cc_in, todo);
1049 				inlen -= todo;
1050 				inblk += todo;
1051 			}
1052 
1053 			if (outblk == blk) {
1054 				crypto_cursor_copyback(&cc_out, blksz, blk);
1055 				outblk = crypto_cursor_segment(&cc_out, &outlen);
1056 			} else {
1057 				crypto_cursor_advance(&cc_out, todo);
1058 				outlen -= todo;
1059 				outblk += todo;
1060 			}
1061 		}
1062 		if (resid > 0) {
1063 			crypto_cursor_copydata(&cc_in, resid, blk);
1064 			exf->encrypt_last(ctx, blk, blk, resid);
1065 			crypto_cursor_copyback(&cc_out, resid, blk);
1066 			exf->update(ctx, blk, resid);
1067 		}
1068 	} else
1069 		crypto_apply(crp, crp->crp_payload_start,
1070 		    crp->crp_payload_length, exf->update, ctx);
1071 	if (crp->crp_payload_length % POLY1305_BLOCK_LEN != 0) {
1072 		/* padding2 */
1073 		memset(blk, 0, POLY1305_BLOCK_LEN);
1074 		exf->update(ctx, blk, POLY1305_BLOCK_LEN -
1075 		    crp->crp_payload_length % POLY1305_BLOCK_LEN);
1076 	}
1077 
1078 	/* lengths */
1079 	blkp = (uint64_t *)blk;
1080 	blkp[0] = htole64(crp->crp_aad_length);
1081 	blkp[1] = htole64(crp->crp_payload_length);
1082 	exf->update(ctx, blk, sizeof(uint64_t) * 2);
1083 
1084 	/* Finalize MAC */
1085 	exf->final(s.tag, ctx);
1086 
1087 	/* Validate tag */
1088 	error = 0;
1089 	if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1090 		crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
1091 		    s.tag2);
1092 		r = timingsafe_bcmp(s.tag, s.tag2, swa->sw_mlen);
1093 		if (r != 0) {
1094 			error = EBADMSG;
1095 			goto out;
1096 		}
1097 
1098 		/* tag matches, decrypt data */
1099 		crypto_cursor_init(&cc_in, &crp->crp_buf);
1100 		crypto_cursor_advance(&cc_in, crp->crp_payload_start);
1101 
1102 		inlen = 0;
1103 		for (resid = crp->crp_payload_length; resid > blksz;
1104 		     resid -= todo) {
1105 			if (inlen == 0)
1106 				inblk = crypto_cursor_segment(&cc_in, &inlen);
1107 			if (outlen == 0)
1108 				outblk = crypto_cursor_segment(&cc_out,
1109 				    &outlen);
1110 			if (inlen < blksz) {
1111 				crypto_cursor_copydata(&cc_in, blksz, blk);
1112 				inblk = blk;
1113 				inlen = blksz;
1114 			}
1115 			if (outlen < blksz) {
1116 				outblk = blk;
1117 				outlen = blksz;
1118 			}
1119 
1120 			todo = rounddown2(MIN(resid, MIN(inlen, outlen)),
1121 			    blksz);
1122 
1123 			exf->decrypt_multi(ctx, inblk, outblk, todo);
1124 
1125 			if (inblk == blk) {
1126 				inblk = crypto_cursor_segment(&cc_in, &inlen);
1127 			} else {
1128 				crypto_cursor_advance(&cc_in, todo);
1129 				inlen -= todo;
1130 				inblk += todo;
1131 			}
1132 
1133 			if (outblk == blk) {
1134 				crypto_cursor_copyback(&cc_out, blksz, blk);
1135 				outblk = crypto_cursor_segment(&cc_out,
1136 				    &outlen);
1137 			} else {
1138 				crypto_cursor_advance(&cc_out, todo);
1139 				outlen -= todo;
1140 				outblk += todo;
1141 			}
1142 		}
1143 		if (resid > 0) {
1144 			crypto_cursor_copydata(&cc_in, resid, blk);
1145 			exf->decrypt_last(ctx, blk, blk, resid);
1146 			crypto_cursor_copyback(&cc_out, resid, blk);
1147 		}
1148 	} else {
1149 		/* Inject the authentication data */
1150 		crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
1151 		    s.tag);
1152 	}
1153 
1154 out:
1155 	explicit_bzero(ctx, exf->ctxsize);
1156 	explicit_bzero(&s, sizeof(s));
1157 	return (error);
1158 }
1159 
1160 /*
1161  * Apply a cipher and a digest to perform EtA.
1162  */
1163 static int
swcr_eta(const struct swcr_session * ses,struct cryptop * crp)1164 swcr_eta(const struct swcr_session *ses, struct cryptop *crp)
1165 {
1166 	int error;
1167 
1168 	if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
1169 		error = swcr_encdec(ses, crp);
1170 		if (error == 0)
1171 			error = swcr_authcompute(ses, crp);
1172 	} else {
1173 		error = swcr_authcompute(ses, crp);
1174 		if (error == 0)
1175 			error = swcr_encdec(ses, crp);
1176 	}
1177 	return (error);
1178 }
1179 
1180 /*
1181  * Apply a compression/decompression algorithm
1182  */
1183 static int
swcr_compdec(const struct swcr_session * ses,struct cryptop * crp)1184 swcr_compdec(const struct swcr_session *ses, struct cryptop *crp)
1185 {
1186 	const struct comp_algo *cxf;
1187 	uint8_t *data, *out;
1188 	int adj;
1189 	uint32_t result;
1190 
1191 	cxf = ses->swcr_compdec.sw_cxf;
1192 
1193 	/* We must handle the whole buffer of data in one time
1194 	 * then if there is not all the data in the mbuf, we must
1195 	 * copy in a buffer.
1196 	 */
1197 
1198 	data = malloc(crp->crp_payload_length, M_CRYPTO_DATA,  M_NOWAIT);
1199 	if (data == NULL)
1200 		return (EINVAL);
1201 	crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
1202 	    data);
1203 
1204 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
1205 		result = cxf->compress(data, crp->crp_payload_length, &out);
1206 	else
1207 		result = cxf->decompress(data, crp->crp_payload_length, &out);
1208 
1209 	free(data, M_CRYPTO_DATA);
1210 	if (result == 0)
1211 		return (EINVAL);
1212 	crp->crp_olen = result;
1213 
1214 	/* Check the compressed size when doing compression */
1215 	if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
1216 		if (result >= crp->crp_payload_length) {
1217 			/* Compression was useless, we lost time */
1218 			free(out, M_CRYPTO_DATA);
1219 			return (0);
1220 		}
1221 	}
1222 
1223 	/* Copy back the (de)compressed data. m_copyback is
1224 	 * extending the mbuf as necessary.
1225 	 */
1226 	crypto_copyback(crp, crp->crp_payload_start, result, out);
1227 	if (result < crp->crp_payload_length) {
1228 		switch (crp->crp_buf.cb_type) {
1229 		case CRYPTO_BUF_MBUF:
1230 		case CRYPTO_BUF_SINGLE_MBUF:
1231 			adj = result - crp->crp_payload_length;
1232 			m_adj(crp->crp_buf.cb_mbuf, adj);
1233 			break;
1234 		case CRYPTO_BUF_UIO: {
1235 			struct uio *uio = crp->crp_buf.cb_uio;
1236 			int ind;
1237 
1238 			adj = crp->crp_payload_length - result;
1239 			ind = uio->uio_iovcnt - 1;
1240 
1241 			while (adj > 0 && ind >= 0) {
1242 				if (adj < uio->uio_iov[ind].iov_len) {
1243 					uio->uio_iov[ind].iov_len -= adj;
1244 					break;
1245 				}
1246 
1247 				adj -= uio->uio_iov[ind].iov_len;
1248 				uio->uio_iov[ind].iov_len = 0;
1249 				ind--;
1250 				uio->uio_iovcnt--;
1251 			}
1252 			}
1253 			break;
1254 		case CRYPTO_BUF_VMPAGE:
1255 			adj = crp->crp_payload_length - result;
1256 			crp->crp_buf.cb_vm_page_len -= adj;
1257 			break;
1258 		default:
1259 			break;
1260 		}
1261 	}
1262 	free(out, M_CRYPTO_DATA);
1263 	return 0;
1264 }
1265 
1266 static int
swcr_setup_cipher(struct swcr_session * ses,const struct crypto_session_params * csp)1267 swcr_setup_cipher(struct swcr_session *ses,
1268     const struct crypto_session_params *csp)
1269 {
1270 	struct swcr_encdec *swe;
1271 	const struct enc_xform *txf;
1272 	int error;
1273 
1274 	swe = &ses->swcr_encdec;
1275 	txf = crypto_cipher(csp);
1276 	if (csp->csp_cipher_key != NULL) {
1277 		if (txf->ctxsize != 0) {
1278 			swe->sw_ctx = malloc(txf->ctxsize, M_CRYPTO_DATA,
1279 			    M_NOWAIT);
1280 			if (swe->sw_ctx == NULL)
1281 				return (ENOMEM);
1282 		}
1283 		error = txf->setkey(swe->sw_ctx,
1284 		    csp->csp_cipher_key, csp->csp_cipher_klen);
1285 		if (error)
1286 			return (error);
1287 	}
1288 	swe->sw_exf = txf;
1289 	return (0);
1290 }
1291 
1292 static int
swcr_setup_auth(struct swcr_session * ses,const struct crypto_session_params * csp)1293 swcr_setup_auth(struct swcr_session *ses,
1294     const struct crypto_session_params *csp)
1295 {
1296 	struct swcr_auth *swa;
1297 	const struct auth_hash *axf;
1298 
1299 	swa = &ses->swcr_auth;
1300 
1301 	axf = crypto_auth_hash(csp);
1302 	swa->sw_axf = axf;
1303 	if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
1304 		return (EINVAL);
1305 	if (csp->csp_auth_mlen == 0)
1306 		swa->sw_mlen = axf->hashsize;
1307 	else
1308 		swa->sw_mlen = csp->csp_auth_mlen;
1309 	if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) {
1310 		swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1311 		    M_NOWAIT);
1312 		if (swa->sw_ictx == NULL)
1313 			return (ENOBUFS);
1314 	}
1315 
1316 	switch (csp->csp_auth_alg) {
1317 	case CRYPTO_SHA1_HMAC:
1318 	case CRYPTO_SHA2_224_HMAC:
1319 	case CRYPTO_SHA2_256_HMAC:
1320 	case CRYPTO_SHA2_384_HMAC:
1321 	case CRYPTO_SHA2_512_HMAC:
1322 	case CRYPTO_RIPEMD160_HMAC:
1323 		swa->sw_hmac = true;
1324 		if (csp->csp_auth_key != NULL) {
1325 			swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
1326 			    M_NOWAIT);
1327 			if (swa->sw_octx == NULL)
1328 				return (ENOBUFS);
1329 			hmac_init_ipad(axf, csp->csp_auth_key,
1330 			    csp->csp_auth_klen, swa->sw_ictx);
1331 			hmac_init_opad(axf, csp->csp_auth_key,
1332 			    csp->csp_auth_klen, swa->sw_octx);
1333 		}
1334 		break;
1335 	case CRYPTO_RIPEMD160:
1336 	case CRYPTO_SHA1:
1337 	case CRYPTO_SHA2_224:
1338 	case CRYPTO_SHA2_256:
1339 	case CRYPTO_SHA2_384:
1340 	case CRYPTO_SHA2_512:
1341 	case CRYPTO_NULL_HMAC:
1342 		axf->Init(swa->sw_ictx);
1343 		break;
1344 	case CRYPTO_AES_NIST_GMAC:
1345 	case CRYPTO_AES_CCM_CBC_MAC:
1346 	case CRYPTO_POLY1305:
1347 		if (csp->csp_auth_key != NULL) {
1348 			axf->Init(swa->sw_ictx);
1349 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1350 			    csp->csp_auth_klen);
1351 		}
1352 		break;
1353 	case CRYPTO_BLAKE2B:
1354 	case CRYPTO_BLAKE2S:
1355 		/*
1356 		 * Blake2b and Blake2s support an optional key but do
1357 		 * not require one.
1358 		 */
1359 		if (csp->csp_auth_klen == 0)
1360 			axf->Init(swa->sw_ictx);
1361 		else if (csp->csp_auth_key != NULL)
1362 			axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
1363 			    csp->csp_auth_klen);
1364 		break;
1365 	}
1366 
1367 	if (csp->csp_mode == CSP_MODE_DIGEST) {
1368 		switch (csp->csp_auth_alg) {
1369 		case CRYPTO_AES_NIST_GMAC:
1370 			ses->swcr_process = swcr_gmac;
1371 			break;
1372 		case CRYPTO_AES_CCM_CBC_MAC:
1373 			ses->swcr_process = swcr_ccm_cbc_mac;
1374 			break;
1375 		default:
1376 			ses->swcr_process = swcr_authcompute;
1377 		}
1378 	}
1379 
1380 	return (0);
1381 }
1382 
1383 static int
swcr_setup_aead(struct swcr_session * ses,const struct crypto_session_params * csp)1384 swcr_setup_aead(struct swcr_session *ses,
1385     const struct crypto_session_params *csp)
1386 {
1387 	struct swcr_auth *swa;
1388 	int error;
1389 
1390 	error = swcr_setup_cipher(ses, csp);
1391 	if (error)
1392 		return (error);
1393 
1394 	swa = &ses->swcr_auth;
1395 	if (csp->csp_auth_mlen == 0)
1396 		swa->sw_mlen = ses->swcr_encdec.sw_exf->macsize;
1397 	else
1398 		swa->sw_mlen = csp->csp_auth_mlen;
1399 	return (0);
1400 }
1401 
1402 static bool
swcr_auth_supported(const struct crypto_session_params * csp)1403 swcr_auth_supported(const struct crypto_session_params *csp)
1404 {
1405 	const struct auth_hash *axf;
1406 
1407 	axf = crypto_auth_hash(csp);
1408 	if (axf == NULL)
1409 		return (false);
1410 	switch (csp->csp_auth_alg) {
1411 	case CRYPTO_SHA1_HMAC:
1412 	case CRYPTO_SHA2_224_HMAC:
1413 	case CRYPTO_SHA2_256_HMAC:
1414 	case CRYPTO_SHA2_384_HMAC:
1415 	case CRYPTO_SHA2_512_HMAC:
1416 	case CRYPTO_NULL_HMAC:
1417 	case CRYPTO_RIPEMD160_HMAC:
1418 		break;
1419 	case CRYPTO_AES_NIST_GMAC:
1420 		switch (csp->csp_auth_klen * 8) {
1421 		case 128:
1422 		case 192:
1423 		case 256:
1424 			break;
1425 		default:
1426 			return (false);
1427 		}
1428 		if (csp->csp_auth_key == NULL)
1429 			return (false);
1430 		if (csp->csp_ivlen != AES_GCM_IV_LEN)
1431 			return (false);
1432 		break;
1433 	case CRYPTO_POLY1305:
1434 		if (csp->csp_auth_klen != POLY1305_KEY_LEN)
1435 			return (false);
1436 		break;
1437 	case CRYPTO_AES_CCM_CBC_MAC:
1438 		switch (csp->csp_auth_klen * 8) {
1439 		case 128:
1440 		case 192:
1441 		case 256:
1442 			break;
1443 		default:
1444 			return (false);
1445 		}
1446 		if (csp->csp_auth_key == NULL)
1447 			return (false);
1448 		break;
1449 	}
1450 	return (true);
1451 }
1452 
1453 static bool
swcr_cipher_supported(const struct crypto_session_params * csp)1454 swcr_cipher_supported(const struct crypto_session_params *csp)
1455 {
1456 	const struct enc_xform *txf;
1457 
1458 	txf = crypto_cipher(csp);
1459 	if (txf == NULL)
1460 		return (false);
1461 	if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
1462 	    txf->ivsize != csp->csp_ivlen)
1463 		return (false);
1464 	return (true);
1465 }
1466 
1467 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN)
1468 
1469 static int
swcr_probesession(device_t dev,const struct crypto_session_params * csp)1470 swcr_probesession(device_t dev, const struct crypto_session_params *csp)
1471 {
1472 	if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0)
1473 		return (EINVAL);
1474 	switch (csp->csp_mode) {
1475 	case CSP_MODE_COMPRESS:
1476 		switch (csp->csp_cipher_alg) {
1477 		case CRYPTO_DEFLATE_COMP:
1478 			break;
1479 		default:
1480 			return (EINVAL);
1481 		}
1482 		break;
1483 	case CSP_MODE_CIPHER:
1484 		switch (csp->csp_cipher_alg) {
1485 		case CRYPTO_AES_NIST_GCM_16:
1486 		case CRYPTO_AES_CCM_16:
1487 		case CRYPTO_CHACHA20_POLY1305:
1488 		case CRYPTO_XCHACHA20_POLY1305:
1489 			return (EINVAL);
1490 		default:
1491 			if (!swcr_cipher_supported(csp))
1492 				return (EINVAL);
1493 			break;
1494 		}
1495 		break;
1496 	case CSP_MODE_DIGEST:
1497 		if (!swcr_auth_supported(csp))
1498 			return (EINVAL);
1499 		break;
1500 	case CSP_MODE_AEAD:
1501 		switch (csp->csp_cipher_alg) {
1502 		case CRYPTO_AES_NIST_GCM_16:
1503 		case CRYPTO_AES_CCM_16:
1504 			switch (csp->csp_cipher_klen * 8) {
1505 			case 128:
1506 			case 192:
1507 			case 256:
1508 				break;
1509 			default:
1510 				return (EINVAL);
1511 			}
1512 			break;
1513 		case CRYPTO_CHACHA20_POLY1305:
1514 		case CRYPTO_XCHACHA20_POLY1305:
1515 			break;
1516 		default:
1517 			return (EINVAL);
1518 		}
1519 		break;
1520 	case CSP_MODE_ETA:
1521 		/* AEAD algorithms cannot be used for EtA. */
1522 		switch (csp->csp_cipher_alg) {
1523 		case CRYPTO_AES_NIST_GCM_16:
1524 		case CRYPTO_AES_CCM_16:
1525 		case CRYPTO_CHACHA20_POLY1305:
1526 		case CRYPTO_XCHACHA20_POLY1305:
1527 			return (EINVAL);
1528 		}
1529 		switch (csp->csp_auth_alg) {
1530 		case CRYPTO_AES_NIST_GMAC:
1531 		case CRYPTO_AES_CCM_CBC_MAC:
1532 			return (EINVAL);
1533 		}
1534 
1535 		if (!swcr_cipher_supported(csp) ||
1536 		    !swcr_auth_supported(csp))
1537 			return (EINVAL);
1538 		break;
1539 	default:
1540 		return (EINVAL);
1541 	}
1542 
1543 	return (CRYPTODEV_PROBE_SOFTWARE);
1544 }
1545 
1546 /*
1547  * Generate a new software session.
1548  */
1549 static int
swcr_newsession(device_t dev,crypto_session_t cses,const struct crypto_session_params * csp)1550 swcr_newsession(device_t dev, crypto_session_t cses,
1551     const struct crypto_session_params *csp)
1552 {
1553 	struct swcr_session *ses;
1554 	const struct comp_algo *cxf;
1555 	int error;
1556 
1557 	ses = crypto_get_driver_session(cses);
1558 
1559 	error = 0;
1560 	switch (csp->csp_mode) {
1561 	case CSP_MODE_COMPRESS:
1562 		switch (csp->csp_cipher_alg) {
1563 		case CRYPTO_DEFLATE_COMP:
1564 			cxf = &comp_algo_deflate;
1565 			break;
1566 #ifdef INVARIANTS
1567 		default:
1568 			panic("bad compression algo");
1569 #endif
1570 		}
1571 		ses->swcr_compdec.sw_cxf = cxf;
1572 		ses->swcr_process = swcr_compdec;
1573 		break;
1574 	case CSP_MODE_CIPHER:
1575 		switch (csp->csp_cipher_alg) {
1576 		case CRYPTO_NULL_CBC:
1577 			ses->swcr_process = swcr_null;
1578 			break;
1579 #ifdef INVARIANTS
1580 		case CRYPTO_AES_NIST_GCM_16:
1581 		case CRYPTO_AES_CCM_16:
1582 		case CRYPTO_CHACHA20_POLY1305:
1583 		case CRYPTO_XCHACHA20_POLY1305:
1584 			panic("bad cipher algo");
1585 #endif
1586 		default:
1587 			error = swcr_setup_cipher(ses, csp);
1588 			if (error == 0)
1589 				ses->swcr_process = swcr_encdec;
1590 		}
1591 		break;
1592 	case CSP_MODE_DIGEST:
1593 		error = swcr_setup_auth(ses, csp);
1594 		break;
1595 	case CSP_MODE_AEAD:
1596 		switch (csp->csp_cipher_alg) {
1597 		case CRYPTO_AES_NIST_GCM_16:
1598 			error = swcr_setup_aead(ses, csp);
1599 			if (error == 0)
1600 				ses->swcr_process = swcr_gcm;
1601 			break;
1602 		case CRYPTO_AES_CCM_16:
1603 			error = swcr_setup_aead(ses, csp);
1604 			if (error == 0)
1605 				ses->swcr_process = swcr_ccm;
1606 			break;
1607 		case CRYPTO_CHACHA20_POLY1305:
1608 		case CRYPTO_XCHACHA20_POLY1305:
1609 			error = swcr_setup_aead(ses, csp);
1610 			if (error == 0)
1611 				ses->swcr_process = swcr_chacha20_poly1305;
1612 			break;
1613 #ifdef INVARIANTS
1614 		default:
1615 			panic("bad aead algo");
1616 #endif
1617 		}
1618 		break;
1619 	case CSP_MODE_ETA:
1620 #ifdef INVARIANTS
1621 		switch (csp->csp_cipher_alg) {
1622 		case CRYPTO_AES_NIST_GCM_16:
1623 		case CRYPTO_AES_CCM_16:
1624 		case CRYPTO_CHACHA20_POLY1305:
1625 		case CRYPTO_XCHACHA20_POLY1305:
1626 			panic("bad eta cipher algo");
1627 		}
1628 		switch (csp->csp_auth_alg) {
1629 		case CRYPTO_AES_NIST_GMAC:
1630 		case CRYPTO_AES_CCM_CBC_MAC:
1631 			panic("bad eta auth algo");
1632 		}
1633 #endif
1634 
1635 		error = swcr_setup_auth(ses, csp);
1636 		if (error)
1637 			break;
1638 		if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
1639 			/* Effectively degrade to digest mode. */
1640 			ses->swcr_process = swcr_authcompute;
1641 			break;
1642 		}
1643 
1644 		error = swcr_setup_cipher(ses, csp);
1645 		if (error == 0)
1646 			ses->swcr_process = swcr_eta;
1647 		break;
1648 	default:
1649 		error = EINVAL;
1650 	}
1651 
1652 	if (error)
1653 		swcr_freesession(dev, cses);
1654 	return (error);
1655 }
1656 
1657 static void
swcr_freesession(device_t dev,crypto_session_t cses)1658 swcr_freesession(device_t dev, crypto_session_t cses)
1659 {
1660 	struct swcr_session *ses;
1661 
1662 	ses = crypto_get_driver_session(cses);
1663 
1664 	zfree(ses->swcr_encdec.sw_ctx, M_CRYPTO_DATA);
1665 	zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA);
1666 	zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA);
1667 }
1668 
1669 /*
1670  * Process a software request.
1671  */
1672 static int
swcr_process(device_t dev,struct cryptop * crp,int hint)1673 swcr_process(device_t dev, struct cryptop *crp, int hint)
1674 {
1675 	struct swcr_session *ses;
1676 
1677 	ses = crypto_get_driver_session(crp->crp_session);
1678 
1679 	crp->crp_etype = ses->swcr_process(ses, crp);
1680 
1681 	crypto_done(crp);
1682 	return (0);
1683 }
1684 
1685 static void
swcr_identify(driver_t * drv,device_t parent)1686 swcr_identify(driver_t *drv, device_t parent)
1687 {
1688 	/* NB: order 10 is so we get attached after h/w devices */
1689 	if (device_find_child(parent, "cryptosoft", -1) == NULL &&
1690 	    BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0)
1691 		panic("cryptosoft: could not attach");
1692 }
1693 
1694 static int
swcr_probe(device_t dev)1695 swcr_probe(device_t dev)
1696 {
1697 	device_set_desc(dev, "software crypto");
1698 	device_quiet(dev);
1699 	return (BUS_PROBE_NOWILDCARD);
1700 }
1701 
1702 static int
swcr_attach(device_t dev)1703 swcr_attach(device_t dev)
1704 {
1705 
1706 	swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
1707 			CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1708 	if (swcr_id < 0) {
1709 		device_printf(dev, "cannot initialize!");
1710 		return (ENXIO);
1711 	}
1712 
1713 	return (0);
1714 }
1715 
1716 static int
swcr_detach(device_t dev)1717 swcr_detach(device_t dev)
1718 {
1719 	crypto_unregister_all(swcr_id);
1720 	return 0;
1721 }
1722 
1723 static device_method_t swcr_methods[] = {
1724 	DEVMETHOD(device_identify,	swcr_identify),
1725 	DEVMETHOD(device_probe,		swcr_probe),
1726 	DEVMETHOD(device_attach,	swcr_attach),
1727 	DEVMETHOD(device_detach,	swcr_detach),
1728 
1729 	DEVMETHOD(cryptodev_probesession, swcr_probesession),
1730 	DEVMETHOD(cryptodev_newsession,	swcr_newsession),
1731 	DEVMETHOD(cryptodev_freesession,swcr_freesession),
1732 	DEVMETHOD(cryptodev_process,	swcr_process),
1733 
1734 	{0, 0},
1735 };
1736 
1737 static driver_t swcr_driver = {
1738 	"cryptosoft",
1739 	swcr_methods,
1740 	0,		/* NB: no softc */
1741 };
1742 
1743 /*
1744  * NB: We explicitly reference the crypto module so we
1745  * get the necessary ordering when built as a loadable
1746  * module.  This is required because we bundle the crypto
1747  * module code together with the cryptosoft driver (otherwise
1748  * normal module dependencies would handle things).
1749  */
1750 extern int crypto_modevent(struct module *, int, void *);
1751 /* XXX where to attach */
1752 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, crypto_modevent, NULL);
1753 MODULE_VERSION(cryptosoft, 1);
1754 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1);
1755