1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 #include <sys/mutex.h> 49 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 53 #include <opencrypto/cryptodev.h> 54 #include <opencrypto/xform.h> 55 56 #include <sys/kobj.h> 57 #include <sys/bus.h> 58 #include "cryptodev_if.h" 59 60 struct swcr_auth { 61 void *sw_ictx; 62 void *sw_octx; 63 struct auth_hash *sw_axf; 64 uint16_t sw_mlen; 65 }; 66 67 struct swcr_encdec { 68 void *sw_kschedule; 69 struct enc_xform *sw_exf; 70 }; 71 72 struct swcr_compdec { 73 struct comp_algo *sw_cxf; 74 }; 75 76 struct swcr_session { 77 struct mtx swcr_lock; 78 int (*swcr_process)(struct swcr_session *, struct cryptop *); 79 80 struct swcr_auth swcr_auth; 81 struct swcr_encdec swcr_encdec; 82 struct swcr_compdec swcr_compdec; 83 }; 84 85 static int32_t swcr_id; 86 87 static void swcr_freesession(device_t dev, crypto_session_t cses); 88 89 /* Used for CRYPTO_NULL_CBC. */ 90 static int 91 swcr_null(struct swcr_session *ses, struct cryptop *crp) 92 { 93 94 return (0); 95 } 96 97 /* 98 * Apply a symmetric encryption/decryption algorithm. 99 */ 100 static int 101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp) 102 { 103 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; 104 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 105 const struct crypto_session_params *csp; 106 struct swcr_encdec *sw; 107 struct enc_xform *exf; 108 int i, blks, inlen, ivlen, outlen, resid; 109 struct crypto_buffer_cursor cc_in, cc_out; 110 const unsigned char *inblk; 111 unsigned char *outblk; 112 int error; 113 bool encrypting; 114 115 error = 0; 116 117 sw = &ses->swcr_encdec; 118 exf = sw->sw_exf; 119 ivlen = exf->ivsize; 120 121 if (exf->native_blocksize == 0) { 122 /* Check for non-padded data */ 123 if ((crp->crp_payload_length % exf->blocksize) != 0) 124 return (EINVAL); 125 126 blks = exf->blocksize; 127 } else 128 blks = exf->native_blocksize; 129 130 if (exf == &enc_xform_aes_icm && 131 (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 132 return (EINVAL); 133 134 if (crp->crp_cipher_key != NULL) { 135 csp = crypto_get_params(crp->crp_session); 136 error = exf->setkey(sw->sw_kschedule, 137 crp->crp_cipher_key, csp->csp_cipher_klen); 138 if (error) 139 return (error); 140 } 141 142 crypto_read_iv(crp, iv); 143 144 if (exf->reinit) { 145 /* 146 * xforms that provide a reinit method perform all IV 147 * handling themselves. 148 */ 149 exf->reinit(sw->sw_kschedule, iv); 150 } 151 152 ivp = iv; 153 154 crypto_cursor_init(&cc_in, &crp->crp_buf); 155 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 156 inlen = crypto_cursor_seglen(&cc_in); 157 inblk = crypto_cursor_segbase(&cc_in); 158 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 159 crypto_cursor_init(&cc_out, &crp->crp_obuf); 160 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 161 } else 162 cc_out = cc_in; 163 outlen = crypto_cursor_seglen(&cc_out); 164 outblk = crypto_cursor_segbase(&cc_out); 165 166 resid = crp->crp_payload_length; 167 encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); 168 169 /* 170 * Loop through encrypting blocks. 'inlen' is the remaining 171 * length of the current segment in the input buffer. 172 * 'outlen' is the remaining length of current segment in the 173 * output buffer. 174 */ 175 while (resid >= blks) { 176 /* 177 * If the current block is not contained within the 178 * current input/output segment, use 'blk' as a local 179 * buffer. 180 */ 181 if (inlen < blks) { 182 crypto_cursor_copydata(&cc_in, blks, blk); 183 inblk = blk; 184 } 185 if (outlen < blks) 186 outblk = blk; 187 188 /* 189 * Ciphers without a 'reinit' hook are assumed to be 190 * used in CBC mode where the chaining is done here. 191 */ 192 if (exf->reinit != NULL) { 193 if (encrypting) 194 exf->encrypt(sw->sw_kschedule, inblk, outblk); 195 else 196 exf->decrypt(sw->sw_kschedule, inblk, outblk); 197 } else if (encrypting) { 198 /* XOR with previous block */ 199 for (i = 0; i < blks; i++) 200 outblk[i] = inblk[i] ^ ivp[i]; 201 202 exf->encrypt(sw->sw_kschedule, outblk, outblk); 203 204 /* 205 * Keep encrypted block for XOR'ing 206 * with next block 207 */ 208 memcpy(iv, outblk, blks); 209 ivp = iv; 210 } else { /* decrypt */ 211 /* 212 * Keep encrypted block for XOR'ing 213 * with next block 214 */ 215 nivp = (ivp == iv) ? iv2 : iv; 216 memcpy(nivp, inblk, blks); 217 218 exf->decrypt(sw->sw_kschedule, inblk, outblk); 219 220 /* XOR with previous block */ 221 for (i = 0; i < blks; i++) 222 outblk[i] ^= ivp[i]; 223 224 ivp = nivp; 225 } 226 227 if (inlen < blks) { 228 inlen = crypto_cursor_seglen(&cc_in); 229 inblk = crypto_cursor_segbase(&cc_in); 230 } else { 231 crypto_cursor_advance(&cc_in, blks); 232 inlen -= blks; 233 inblk += blks; 234 } 235 236 if (outlen < blks) { 237 crypto_cursor_copyback(&cc_out, blks, blk); 238 outlen = crypto_cursor_seglen(&cc_out); 239 outblk = crypto_cursor_segbase(&cc_out); 240 } else { 241 crypto_cursor_advance(&cc_out, blks); 242 outlen -= blks; 243 outblk += blks; 244 } 245 246 resid -= blks; 247 } 248 249 /* Handle trailing partial block for stream ciphers. */ 250 if (resid > 0) { 251 KASSERT(exf->native_blocksize != 0, 252 ("%s: partial block of %d bytes for cipher %s", 253 __func__, i, exf->name)); 254 KASSERT(exf->reinit != NULL, 255 ("%s: partial block cipher %s without reinit hook", 256 __func__, exf->name)); 257 KASSERT(resid < blks, ("%s: partial block too big", __func__)); 258 259 inlen = crypto_cursor_seglen(&cc_in); 260 outlen = crypto_cursor_seglen(&cc_out); 261 if (inlen < resid) { 262 crypto_cursor_copydata(&cc_in, resid, blk); 263 inblk = blk; 264 } else 265 inblk = crypto_cursor_segbase(&cc_in); 266 if (outlen < resid) 267 outblk = blk; 268 else 269 outblk = crypto_cursor_segbase(&cc_out); 270 if (encrypting) 271 exf->encrypt_last(sw->sw_kschedule, inblk, outblk, 272 resid); 273 else 274 exf->decrypt_last(sw->sw_kschedule, inblk, outblk, 275 resid); 276 if (outlen < resid) 277 crypto_cursor_copyback(&cc_out, resid, blk); 278 } 279 280 explicit_bzero(blk, sizeof(blk)); 281 explicit_bzero(iv, sizeof(iv)); 282 explicit_bzero(iv2, sizeof(iv2)); 283 return (0); 284 } 285 286 static void 287 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, 288 const uint8_t *key, int klen) 289 { 290 291 switch (axf->type) { 292 case CRYPTO_SHA1_HMAC: 293 case CRYPTO_SHA2_224_HMAC: 294 case CRYPTO_SHA2_256_HMAC: 295 case CRYPTO_SHA2_384_HMAC: 296 case CRYPTO_SHA2_512_HMAC: 297 case CRYPTO_NULL_HMAC: 298 case CRYPTO_RIPEMD160_HMAC: 299 hmac_init_ipad(axf, key, klen, sw->sw_ictx); 300 hmac_init_opad(axf, key, klen, sw->sw_octx); 301 break; 302 case CRYPTO_POLY1305: 303 case CRYPTO_BLAKE2B: 304 case CRYPTO_BLAKE2S: 305 axf->Setkey(sw->sw_ictx, key, klen); 306 axf->Init(sw->sw_ictx); 307 break; 308 default: 309 panic("%s: algorithm %d doesn't use keys", __func__, axf->type); 310 } 311 } 312 313 /* 314 * Compute or verify hash. 315 */ 316 static int 317 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) 318 { 319 u_char aalg[HASH_MAX_LEN]; 320 const struct crypto_session_params *csp; 321 struct swcr_auth *sw; 322 struct auth_hash *axf; 323 union authctx ctx; 324 int err; 325 326 sw = &ses->swcr_auth; 327 328 axf = sw->sw_axf; 329 330 csp = crypto_get_params(crp->crp_session); 331 if (crp->crp_auth_key != NULL) { 332 swcr_authprepare(axf, sw, crp->crp_auth_key, 333 csp->csp_auth_klen); 334 } 335 336 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 337 338 if (crp->crp_aad != NULL) 339 err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 340 else 341 err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 342 axf->Update, &ctx); 343 if (err) 344 goto out; 345 346 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && 347 CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 348 err = crypto_apply_buf(&crp->crp_obuf, 349 crp->crp_payload_output_start, crp->crp_payload_length, 350 axf->Update, &ctx); 351 else 352 err = crypto_apply(crp, crp->crp_payload_start, 353 crp->crp_payload_length, axf->Update, &ctx); 354 if (err) 355 goto out; 356 357 if (csp->csp_flags & CSP_F_ESN) 358 axf->Update(&ctx, crp->crp_esn, 4); 359 360 axf->Final(aalg, &ctx); 361 if (sw->sw_octx != NULL) { 362 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 363 axf->Update(&ctx, aalg, axf->hashsize); 364 axf->Final(aalg, &ctx); 365 } 366 367 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 368 u_char uaalg[HASH_MAX_LEN]; 369 370 crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); 371 if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) 372 err = EBADMSG; 373 explicit_bzero(uaalg, sizeof(uaalg)); 374 } else { 375 /* Inject the authentication data */ 376 crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); 377 } 378 explicit_bzero(aalg, sizeof(aalg)); 379 out: 380 explicit_bzero(&ctx, sizeof(ctx)); 381 return (err); 382 } 383 384 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 385 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 386 387 static int 388 swcr_gmac(struct swcr_session *ses, struct cryptop *crp) 389 { 390 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 391 u_char *blk = (u_char *)blkbuf; 392 u_char tag[GMAC_DIGEST_LEN]; 393 u_char iv[AES_BLOCK_LEN]; 394 struct crypto_buffer_cursor cc; 395 const u_char *inblk; 396 union authctx ctx; 397 struct swcr_auth *swa; 398 struct auth_hash *axf; 399 uint32_t *blkp; 400 int blksz, error, ivlen, len, resid; 401 402 swa = &ses->swcr_auth; 403 axf = swa->sw_axf; 404 405 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 406 blksz = GMAC_BLOCK_LEN; 407 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 408 __func__)); 409 410 /* Initialize the IV */ 411 ivlen = AES_GCM_IV_LEN; 412 crypto_read_iv(crp, iv); 413 414 axf->Reinit(&ctx, iv, ivlen); 415 crypto_cursor_init(&cc, &crp->crp_buf); 416 crypto_cursor_advance(&cc, crp->crp_payload_start); 417 for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { 418 len = crypto_cursor_seglen(&cc); 419 if (len >= blksz) { 420 inblk = crypto_cursor_segbase(&cc); 421 len = rounddown(MIN(len, resid), blksz); 422 crypto_cursor_advance(&cc, len); 423 } else { 424 len = blksz; 425 crypto_cursor_copydata(&cc, len, blk); 426 inblk = blk; 427 } 428 axf->Update(&ctx, inblk, len); 429 } 430 if (resid > 0) { 431 memset(blk, 0, blksz); 432 crypto_cursor_copydata(&cc, resid, blk); 433 axf->Update(&ctx, blk, blksz); 434 } 435 436 /* length block */ 437 memset(blk, 0, blksz); 438 blkp = (uint32_t *)blk + 1; 439 *blkp = htobe32(crp->crp_payload_length * 8); 440 axf->Update(&ctx, blk, blksz); 441 442 /* Finalize MAC */ 443 axf->Final(tag, &ctx); 444 445 error = 0; 446 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 447 u_char tag2[GMAC_DIGEST_LEN]; 448 449 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 450 tag2); 451 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 452 error = EBADMSG; 453 explicit_bzero(tag2, sizeof(tag2)); 454 } else { 455 /* Inject the authentication data */ 456 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 457 } 458 explicit_bzero(blkbuf, sizeof(blkbuf)); 459 explicit_bzero(tag, sizeof(tag)); 460 explicit_bzero(iv, sizeof(iv)); 461 return (error); 462 } 463 464 static int 465 swcr_gcm(struct swcr_session *ses, struct cryptop *crp) 466 { 467 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 468 u_char *blk = (u_char *)blkbuf; 469 u_char tag[GMAC_DIGEST_LEN]; 470 u_char iv[AES_BLOCK_LEN]; 471 struct crypto_buffer_cursor cc_in, cc_out; 472 const u_char *inblk; 473 u_char *outblk; 474 union authctx ctx; 475 struct swcr_auth *swa; 476 struct swcr_encdec *swe; 477 struct auth_hash *axf; 478 struct enc_xform *exf; 479 uint32_t *blkp; 480 int blksz, error, ivlen, len, r, resid; 481 482 swa = &ses->swcr_auth; 483 axf = swa->sw_axf; 484 485 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 486 blksz = GMAC_BLOCK_LEN; 487 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 488 __func__)); 489 490 swe = &ses->swcr_encdec; 491 exf = swe->sw_exf; 492 KASSERT(axf->blocksize == exf->native_blocksize, 493 ("%s: blocksize mismatch", __func__)); 494 495 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 496 return (EINVAL); 497 498 /* Initialize the IV */ 499 ivlen = AES_GCM_IV_LEN; 500 bcopy(crp->crp_iv, iv, ivlen); 501 502 /* Supply MAC with IV */ 503 axf->Reinit(&ctx, iv, ivlen); 504 505 /* Supply MAC with AAD */ 506 if (crp->crp_aad != NULL) { 507 len = rounddown(crp->crp_aad_length, blksz); 508 if (len != 0) 509 axf->Update(&ctx, crp->crp_aad, len); 510 if (crp->crp_aad_length != len) { 511 memset(blk, 0, blksz); 512 memcpy(blk, (char *)crp->crp_aad + len, 513 crp->crp_aad_length - len); 514 axf->Update(&ctx, blk, blksz); 515 } 516 } else { 517 crypto_cursor_init(&cc_in, &crp->crp_buf); 518 crypto_cursor_advance(&cc_in, crp->crp_aad_start); 519 for (resid = crp->crp_aad_length; resid >= blksz; 520 resid -= len) { 521 len = crypto_cursor_seglen(&cc_in); 522 if (len >= blksz) { 523 inblk = crypto_cursor_segbase(&cc_in); 524 len = rounddown(MIN(len, resid), blksz); 525 crypto_cursor_advance(&cc_in, len); 526 } else { 527 len = blksz; 528 crypto_cursor_copydata(&cc_in, len, blk); 529 inblk = blk; 530 } 531 axf->Update(&ctx, inblk, len); 532 } 533 if (resid > 0) { 534 memset(blk, 0, blksz); 535 crypto_cursor_copydata(&cc_in, resid, blk); 536 axf->Update(&ctx, blk, blksz); 537 } 538 } 539 540 if (crp->crp_cipher_key != NULL) 541 exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, 542 crypto_get_params(crp->crp_session)->csp_cipher_klen); 543 exf->reinit(swe->sw_kschedule, iv); 544 545 /* Do encryption with MAC */ 546 crypto_cursor_init(&cc_in, &crp->crp_buf); 547 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 548 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 549 crypto_cursor_init(&cc_out, &crp->crp_obuf); 550 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 551 } else 552 cc_out = cc_in; 553 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 554 if (crypto_cursor_seglen(&cc_in) < blksz) { 555 crypto_cursor_copydata(&cc_in, blksz, blk); 556 inblk = blk; 557 } else { 558 inblk = crypto_cursor_segbase(&cc_in); 559 crypto_cursor_advance(&cc_in, blksz); 560 } 561 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 562 if (crypto_cursor_seglen(&cc_out) < blksz) 563 outblk = blk; 564 else 565 outblk = crypto_cursor_segbase(&cc_out); 566 exf->encrypt(swe->sw_kschedule, inblk, outblk); 567 axf->Update(&ctx, outblk, blksz); 568 if (outblk == blk) 569 crypto_cursor_copyback(&cc_out, blksz, blk); 570 else 571 crypto_cursor_advance(&cc_out, blksz); 572 } else { 573 axf->Update(&ctx, inblk, blksz); 574 } 575 } 576 if (resid > 0) { 577 crypto_cursor_copydata(&cc_in, resid, blk); 578 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 579 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 580 crypto_cursor_copyback(&cc_out, resid, blk); 581 } 582 axf->Update(&ctx, blk, resid); 583 } 584 585 /* length block */ 586 memset(blk, 0, blksz); 587 blkp = (uint32_t *)blk + 1; 588 *blkp = htobe32(crp->crp_aad_length * 8); 589 blkp = (uint32_t *)blk + 3; 590 *blkp = htobe32(crp->crp_payload_length * 8); 591 axf->Update(&ctx, blk, blksz); 592 593 /* Finalize MAC */ 594 axf->Final(tag, &ctx); 595 596 /* Validate tag */ 597 error = 0; 598 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 599 u_char tag2[GMAC_DIGEST_LEN]; 600 601 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); 602 603 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 604 explicit_bzero(tag2, sizeof(tag2)); 605 if (r != 0) { 606 error = EBADMSG; 607 goto out; 608 } 609 610 /* tag matches, decrypt data */ 611 crypto_cursor_init(&cc_in, &crp->crp_buf); 612 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 613 for (resid = crp->crp_payload_length; resid > blksz; 614 resid -= blksz) { 615 if (crypto_cursor_seglen(&cc_in) < blksz) { 616 crypto_cursor_copydata(&cc_in, blksz, blk); 617 inblk = blk; 618 } else { 619 inblk = crypto_cursor_segbase(&cc_in); 620 crypto_cursor_advance(&cc_in, blksz); 621 } 622 if (crypto_cursor_seglen(&cc_out) < blksz) 623 outblk = blk; 624 else 625 outblk = crypto_cursor_segbase(&cc_out); 626 exf->decrypt(swe->sw_kschedule, inblk, outblk); 627 if (outblk == blk) 628 crypto_cursor_copyback(&cc_out, blksz, blk); 629 else 630 crypto_cursor_advance(&cc_out, blksz); 631 } 632 if (resid > 0) { 633 crypto_cursor_copydata(&cc_in, resid, blk); 634 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 635 crypto_cursor_copyback(&cc_out, resid, blk); 636 } 637 } else { 638 /* Inject the authentication data */ 639 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 640 } 641 642 out: 643 explicit_bzero(blkbuf, sizeof(blkbuf)); 644 explicit_bzero(tag, sizeof(tag)); 645 explicit_bzero(iv, sizeof(iv)); 646 647 return (error); 648 } 649 650 static int 651 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) 652 { 653 u_char tag[AES_CBC_MAC_HASH_LEN]; 654 u_char iv[AES_BLOCK_LEN]; 655 union authctx ctx; 656 struct swcr_auth *swa; 657 struct auth_hash *axf; 658 int error, ivlen; 659 660 swa = &ses->swcr_auth; 661 axf = swa->sw_axf; 662 663 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 664 665 /* Initialize the IV */ 666 ivlen = AES_CCM_IV_LEN; 667 crypto_read_iv(crp, iv); 668 669 /* 670 * AES CCM-CBC-MAC needs to know the length of both the auth 671 * data and payload data before doing the auth computation. 672 */ 673 ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length; 674 ctx.aes_cbc_mac_ctx.cryptDataLength = 0; 675 676 axf->Reinit(&ctx, iv, ivlen); 677 if (crp->crp_aad != NULL) 678 error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 679 else 680 error = crypto_apply(crp, crp->crp_payload_start, 681 crp->crp_payload_length, axf->Update, &ctx); 682 if (error) 683 return (error); 684 685 /* Finalize MAC */ 686 axf->Final(tag, &ctx); 687 688 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 689 u_char tag2[AES_CBC_MAC_HASH_LEN]; 690 691 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 692 tag2); 693 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 694 error = EBADMSG; 695 explicit_bzero(tag2, sizeof(tag)); 696 } else { 697 /* Inject the authentication data */ 698 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 699 } 700 explicit_bzero(tag, sizeof(tag)); 701 explicit_bzero(iv, sizeof(iv)); 702 return (error); 703 } 704 705 static int 706 swcr_ccm(struct swcr_session *ses, struct cryptop *crp) 707 { 708 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 709 u_char *blk = (u_char *)blkbuf; 710 u_char tag[AES_CBC_MAC_HASH_LEN]; 711 u_char iv[AES_BLOCK_LEN]; 712 struct crypto_buffer_cursor cc_in, cc_out; 713 const u_char *inblk; 714 u_char *outblk; 715 union authctx ctx; 716 struct swcr_auth *swa; 717 struct swcr_encdec *swe; 718 struct auth_hash *axf; 719 struct enc_xform *exf; 720 int blksz, error, ivlen, r, resid; 721 722 swa = &ses->swcr_auth; 723 axf = swa->sw_axf; 724 725 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 726 blksz = AES_BLOCK_LEN; 727 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 728 __func__)); 729 730 swe = &ses->swcr_encdec; 731 exf = swe->sw_exf; 732 KASSERT(axf->blocksize == exf->native_blocksize, 733 ("%s: blocksize mismatch", __func__)); 734 735 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 736 return (EINVAL); 737 738 /* Initialize the IV */ 739 ivlen = AES_CCM_IV_LEN; 740 bcopy(crp->crp_iv, iv, ivlen); 741 742 /* 743 * AES CCM-CBC-MAC needs to know the length of both the auth 744 * data and payload data before doing the auth computation. 745 */ 746 ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; 747 ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; 748 749 /* Supply MAC with IV */ 750 axf->Reinit(&ctx, iv, ivlen); 751 752 /* Supply MAC with AAD */ 753 if (crp->crp_aad != NULL) 754 error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 755 else 756 error = crypto_apply(crp, crp->crp_aad_start, 757 crp->crp_aad_length, axf->Update, &ctx); 758 if (error) 759 return (error); 760 761 if (crp->crp_cipher_key != NULL) 762 exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, 763 crypto_get_params(crp->crp_session)->csp_cipher_klen); 764 exf->reinit(swe->sw_kschedule, iv); 765 766 /* Do encryption/decryption with MAC */ 767 crypto_cursor_init(&cc_in, &crp->crp_buf); 768 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 769 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 770 crypto_cursor_init(&cc_out, &crp->crp_obuf); 771 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 772 } else 773 cc_out = cc_in; 774 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 775 if (crypto_cursor_seglen(&cc_in) < blksz) { 776 crypto_cursor_copydata(&cc_in, blksz, blk); 777 inblk = blk; 778 } else { 779 inblk = crypto_cursor_segbase(&cc_in); 780 crypto_cursor_advance(&cc_in, blksz); 781 } 782 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 783 if (crypto_cursor_seglen(&cc_out) < blksz) 784 outblk = blk; 785 else 786 outblk = crypto_cursor_segbase(&cc_out); 787 axf->Update(&ctx, inblk, blksz); 788 exf->encrypt(swe->sw_kschedule, inblk, outblk); 789 if (outblk == blk) 790 crypto_cursor_copyback(&cc_out, blksz, blk); 791 else 792 crypto_cursor_advance(&cc_out, blksz); 793 } else { 794 /* 795 * One of the problems with CCM+CBC is that 796 * the authentication is done on the 797 * unencrypted data. As a result, we have to 798 * decrypt the data twice: once to generate 799 * the tag and a second time after the tag is 800 * verified. 801 */ 802 exf->decrypt(swe->sw_kschedule, inblk, blk); 803 axf->Update(&ctx, blk, blksz); 804 } 805 } 806 if (resid > 0) { 807 crypto_cursor_copydata(&cc_in, resid, blk); 808 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 809 axf->Update(&ctx, blk, resid); 810 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 811 crypto_cursor_copyback(&cc_out, resid, blk); 812 } else { 813 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 814 axf->Update(&ctx, blk, resid); 815 } 816 } 817 818 /* Finalize MAC */ 819 axf->Final(tag, &ctx); 820 821 /* Validate tag */ 822 error = 0; 823 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 824 u_char tag2[AES_CBC_MAC_HASH_LEN]; 825 826 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 827 tag2); 828 829 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 830 explicit_bzero(tag2, sizeof(tag2)); 831 if (r != 0) { 832 error = EBADMSG; 833 goto out; 834 } 835 836 /* tag matches, decrypt data */ 837 exf->reinit(swe->sw_kschedule, iv); 838 crypto_cursor_init(&cc_in, &crp->crp_buf); 839 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 840 for (resid = crp->crp_payload_length; resid > blksz; 841 resid -= blksz) { 842 if (crypto_cursor_seglen(&cc_in) < blksz) { 843 crypto_cursor_copydata(&cc_in, blksz, blk); 844 inblk = blk; 845 } else { 846 inblk = crypto_cursor_segbase(&cc_in); 847 crypto_cursor_advance(&cc_in, blksz); 848 } 849 if (crypto_cursor_seglen(&cc_out) < blksz) 850 outblk = blk; 851 else 852 outblk = crypto_cursor_segbase(&cc_out); 853 exf->decrypt(swe->sw_kschedule, inblk, outblk); 854 if (outblk == blk) 855 crypto_cursor_copyback(&cc_out, blksz, blk); 856 else 857 crypto_cursor_advance(&cc_out, blksz); 858 } 859 if (resid > 0) { 860 crypto_cursor_copydata(&cc_in, resid, blk); 861 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 862 crypto_cursor_copyback(&cc_out, resid, blk); 863 } 864 } else { 865 /* Inject the authentication data */ 866 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 867 } 868 869 out: 870 explicit_bzero(blkbuf, sizeof(blkbuf)); 871 explicit_bzero(tag, sizeof(tag)); 872 explicit_bzero(iv, sizeof(iv)); 873 return (error); 874 } 875 876 static int 877 swcr_chacha20_poly1305(struct swcr_session *ses, struct cryptop *crp) 878 { 879 const struct crypto_session_params *csp; 880 uint64_t blkbuf[howmany(CHACHA20_NATIVE_BLOCK_LEN, sizeof(uint64_t))]; 881 u_char *blk = (u_char *)blkbuf; 882 u_char tag[POLY1305_HASH_LEN]; 883 struct crypto_buffer_cursor cc_in, cc_out; 884 const u_char *inblk; 885 u_char *outblk; 886 uint64_t *blkp; 887 union authctx ctx; 888 struct swcr_auth *swa; 889 struct swcr_encdec *swe; 890 struct auth_hash *axf; 891 struct enc_xform *exf; 892 int blksz, error, r, resid; 893 894 swa = &ses->swcr_auth; 895 axf = swa->sw_axf; 896 897 swe = &ses->swcr_encdec; 898 exf = swe->sw_exf; 899 blksz = exf->native_blocksize; 900 KASSERT(blksz <= sizeof(blkbuf), ("%s: blocksize mismatch", __func__)); 901 902 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 903 return (EINVAL); 904 905 csp = crypto_get_params(crp->crp_session); 906 907 /* Generate Poly1305 key. */ 908 if (crp->crp_cipher_key != NULL) 909 axf->Setkey(&ctx, crp->crp_cipher_key, csp->csp_cipher_klen); 910 else 911 axf->Setkey(&ctx, csp->csp_cipher_key, csp->csp_cipher_klen); 912 axf->Reinit(&ctx, crp->crp_iv, csp->csp_ivlen); 913 914 /* Supply MAC with AAD */ 915 if (crp->crp_aad != NULL) 916 axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 917 else 918 crypto_apply(crp, crp->crp_aad_start, 919 crp->crp_aad_length, axf->Update, &ctx); 920 if (crp->crp_aad_length % 16 != 0) { 921 /* padding1 */ 922 memset(blk, 0, 16); 923 axf->Update(&ctx, blk, 16 - crp->crp_aad_length % 16); 924 } 925 926 if (crp->crp_cipher_key != NULL) 927 exf->setkey(swe->sw_kschedule, crp->crp_cipher_key, 928 csp->csp_cipher_klen); 929 exf->reinit(swe->sw_kschedule, crp->crp_iv); 930 931 /* Do encryption with MAC */ 932 crypto_cursor_init(&cc_in, &crp->crp_buf); 933 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 934 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 935 crypto_cursor_init(&cc_out, &crp->crp_obuf); 936 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 937 } else 938 cc_out = cc_in; 939 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 940 if (crypto_cursor_seglen(&cc_in) < blksz) { 941 crypto_cursor_copydata(&cc_in, blksz, blk); 942 inblk = blk; 943 } else { 944 inblk = crypto_cursor_segbase(&cc_in); 945 crypto_cursor_advance(&cc_in, blksz); 946 } 947 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 948 if (crypto_cursor_seglen(&cc_out) < blksz) 949 outblk = blk; 950 else 951 outblk = crypto_cursor_segbase(&cc_out); 952 exf->encrypt(swe->sw_kschedule, inblk, outblk); 953 axf->Update(&ctx, outblk, blksz); 954 if (outblk == blk) 955 crypto_cursor_copyback(&cc_out, blksz, blk); 956 else 957 crypto_cursor_advance(&cc_out, blksz); 958 } else { 959 axf->Update(&ctx, inblk, blksz); 960 } 961 } 962 if (resid > 0) { 963 crypto_cursor_copydata(&cc_in, resid, blk); 964 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 965 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 966 crypto_cursor_copyback(&cc_out, resid, blk); 967 } 968 axf->Update(&ctx, blk, resid); 969 if (resid % 16 != 0) { 970 /* padding2 */ 971 memset(blk, 0, 16); 972 axf->Update(&ctx, blk, 16 - resid % 16); 973 } 974 } 975 976 /* lengths */ 977 blkp = (uint64_t *)blk; 978 blkp[0] = htole64(crp->crp_aad_length); 979 blkp[1] = htole64(crp->crp_payload_length); 980 axf->Update(&ctx, blk, sizeof(uint64_t) * 2); 981 982 /* Finalize MAC */ 983 axf->Final(tag, &ctx); 984 985 /* Validate tag */ 986 error = 0; 987 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 988 u_char tag2[POLY1305_HASH_LEN]; 989 990 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); 991 992 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 993 explicit_bzero(tag2, sizeof(tag2)); 994 if (r != 0) { 995 error = EBADMSG; 996 goto out; 997 } 998 999 /* tag matches, decrypt data */ 1000 crypto_cursor_init(&cc_in, &crp->crp_buf); 1001 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 1002 for (resid = crp->crp_payload_length; resid > blksz; 1003 resid -= blksz) { 1004 if (crypto_cursor_seglen(&cc_in) < blksz) { 1005 crypto_cursor_copydata(&cc_in, blksz, blk); 1006 inblk = blk; 1007 } else { 1008 inblk = crypto_cursor_segbase(&cc_in); 1009 crypto_cursor_advance(&cc_in, blksz); 1010 } 1011 if (crypto_cursor_seglen(&cc_out) < blksz) 1012 outblk = blk; 1013 else 1014 outblk = crypto_cursor_segbase(&cc_out); 1015 exf->decrypt(swe->sw_kschedule, inblk, outblk); 1016 if (outblk == blk) 1017 crypto_cursor_copyback(&cc_out, blksz, blk); 1018 else 1019 crypto_cursor_advance(&cc_out, blksz); 1020 } 1021 if (resid > 0) { 1022 crypto_cursor_copydata(&cc_in, resid, blk); 1023 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 1024 crypto_cursor_copyback(&cc_out, resid, blk); 1025 } 1026 } else { 1027 /* Inject the authentication data */ 1028 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 1029 } 1030 1031 out: 1032 explicit_bzero(blkbuf, sizeof(blkbuf)); 1033 explicit_bzero(tag, sizeof(tag)); 1034 explicit_bzero(&ctx, sizeof(ctx)); 1035 return (error); 1036 } 1037 1038 /* 1039 * Apply a cipher and a digest to perform EtA. 1040 */ 1041 static int 1042 swcr_eta(struct swcr_session *ses, struct cryptop *crp) 1043 { 1044 int error; 1045 1046 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 1047 error = swcr_encdec(ses, crp); 1048 if (error == 0) 1049 error = swcr_authcompute(ses, crp); 1050 } else { 1051 error = swcr_authcompute(ses, crp); 1052 if (error == 0) 1053 error = swcr_encdec(ses, crp); 1054 } 1055 return (error); 1056 } 1057 1058 /* 1059 * Apply a compression/decompression algorithm 1060 */ 1061 static int 1062 swcr_compdec(struct swcr_session *ses, struct cryptop *crp) 1063 { 1064 uint8_t *data, *out; 1065 struct comp_algo *cxf; 1066 int adj; 1067 uint32_t result; 1068 1069 cxf = ses->swcr_compdec.sw_cxf; 1070 1071 /* We must handle the whole buffer of data in one time 1072 * then if there is not all the data in the mbuf, we must 1073 * copy in a buffer. 1074 */ 1075 1076 data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); 1077 if (data == NULL) 1078 return (EINVAL); 1079 crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, 1080 data); 1081 1082 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) 1083 result = cxf->compress(data, crp->crp_payload_length, &out); 1084 else 1085 result = cxf->decompress(data, crp->crp_payload_length, &out); 1086 1087 free(data, M_CRYPTO_DATA); 1088 if (result == 0) 1089 return (EINVAL); 1090 crp->crp_olen = result; 1091 1092 /* Check the compressed size when doing compression */ 1093 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { 1094 if (result >= crp->crp_payload_length) { 1095 /* Compression was useless, we lost time */ 1096 free(out, M_CRYPTO_DATA); 1097 return (0); 1098 } 1099 } 1100 1101 /* Copy back the (de)compressed data. m_copyback is 1102 * extending the mbuf as necessary. 1103 */ 1104 crypto_copyback(crp, crp->crp_payload_start, result, out); 1105 if (result < crp->crp_payload_length) { 1106 switch (crp->crp_buf.cb_type) { 1107 case CRYPTO_BUF_MBUF: 1108 adj = result - crp->crp_payload_length; 1109 m_adj(crp->crp_buf.cb_mbuf, adj); 1110 break; 1111 case CRYPTO_BUF_UIO: { 1112 struct uio *uio = crp->crp_buf.cb_uio; 1113 int ind; 1114 1115 adj = crp->crp_payload_length - result; 1116 ind = uio->uio_iovcnt - 1; 1117 1118 while (adj > 0 && ind >= 0) { 1119 if (adj < uio->uio_iov[ind].iov_len) { 1120 uio->uio_iov[ind].iov_len -= adj; 1121 break; 1122 } 1123 1124 adj -= uio->uio_iov[ind].iov_len; 1125 uio->uio_iov[ind].iov_len = 0; 1126 ind--; 1127 uio->uio_iovcnt--; 1128 } 1129 } 1130 break; 1131 case CRYPTO_BUF_VMPAGE: 1132 adj = crp->crp_payload_length - result; 1133 crp->crp_buf.cb_vm_page_len -= adj; 1134 break; 1135 default: 1136 break; 1137 } 1138 } 1139 free(out, M_CRYPTO_DATA); 1140 return 0; 1141 } 1142 1143 static int 1144 swcr_setup_cipher(struct swcr_session *ses, 1145 const struct crypto_session_params *csp) 1146 { 1147 struct swcr_encdec *swe; 1148 struct enc_xform *txf; 1149 int error; 1150 1151 swe = &ses->swcr_encdec; 1152 txf = crypto_cipher(csp); 1153 MPASS(txf->ivsize == csp->csp_ivlen); 1154 if (txf->ctxsize != 0) { 1155 swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, 1156 M_NOWAIT); 1157 if (swe->sw_kschedule == NULL) 1158 return (ENOMEM); 1159 } 1160 if (csp->csp_cipher_key != NULL) { 1161 error = txf->setkey(swe->sw_kschedule, 1162 csp->csp_cipher_key, csp->csp_cipher_klen); 1163 if (error) 1164 return (error); 1165 } 1166 swe->sw_exf = txf; 1167 return (0); 1168 } 1169 1170 static int 1171 swcr_setup_auth(struct swcr_session *ses, 1172 const struct crypto_session_params *csp) 1173 { 1174 struct swcr_auth *swa; 1175 struct auth_hash *axf; 1176 1177 swa = &ses->swcr_auth; 1178 1179 axf = crypto_auth_hash(csp); 1180 swa->sw_axf = axf; 1181 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1182 return (EINVAL); 1183 if (csp->csp_auth_mlen == 0) 1184 swa->sw_mlen = axf->hashsize; 1185 else 1186 swa->sw_mlen = csp->csp_auth_mlen; 1187 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1188 if (swa->sw_ictx == NULL) 1189 return (ENOBUFS); 1190 1191 switch (csp->csp_auth_alg) { 1192 case CRYPTO_SHA1_HMAC: 1193 case CRYPTO_SHA2_224_HMAC: 1194 case CRYPTO_SHA2_256_HMAC: 1195 case CRYPTO_SHA2_384_HMAC: 1196 case CRYPTO_SHA2_512_HMAC: 1197 case CRYPTO_NULL_HMAC: 1198 case CRYPTO_RIPEMD160_HMAC: 1199 swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 1200 M_NOWAIT); 1201 if (swa->sw_octx == NULL) 1202 return (ENOBUFS); 1203 1204 if (csp->csp_auth_key != NULL) { 1205 swcr_authprepare(axf, swa, csp->csp_auth_key, 1206 csp->csp_auth_klen); 1207 } 1208 1209 if (csp->csp_mode == CSP_MODE_DIGEST) 1210 ses->swcr_process = swcr_authcompute; 1211 break; 1212 case CRYPTO_SHA1: 1213 case CRYPTO_SHA2_224: 1214 case CRYPTO_SHA2_256: 1215 case CRYPTO_SHA2_384: 1216 case CRYPTO_SHA2_512: 1217 axf->Init(swa->sw_ictx); 1218 if (csp->csp_mode == CSP_MODE_DIGEST) 1219 ses->swcr_process = swcr_authcompute; 1220 break; 1221 case CRYPTO_AES_NIST_GMAC: 1222 axf->Init(swa->sw_ictx); 1223 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1224 csp->csp_auth_klen); 1225 if (csp->csp_mode == CSP_MODE_DIGEST) 1226 ses->swcr_process = swcr_gmac; 1227 break; 1228 case CRYPTO_POLY1305: 1229 case CRYPTO_BLAKE2B: 1230 case CRYPTO_BLAKE2S: 1231 /* 1232 * Blake2b and Blake2s support an optional key but do 1233 * not require one. 1234 */ 1235 if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) 1236 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1237 csp->csp_auth_klen); 1238 axf->Init(swa->sw_ictx); 1239 if (csp->csp_mode == CSP_MODE_DIGEST) 1240 ses->swcr_process = swcr_authcompute; 1241 break; 1242 case CRYPTO_AES_CCM_CBC_MAC: 1243 axf->Init(swa->sw_ictx); 1244 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1245 csp->csp_auth_klen); 1246 if (csp->csp_mode == CSP_MODE_DIGEST) 1247 ses->swcr_process = swcr_ccm_cbc_mac; 1248 break; 1249 } 1250 1251 return (0); 1252 } 1253 1254 static int 1255 swcr_setup_gcm(struct swcr_session *ses, 1256 const struct crypto_session_params *csp) 1257 { 1258 struct swcr_auth *swa; 1259 struct auth_hash *axf; 1260 1261 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1262 return (EINVAL); 1263 1264 /* First, setup the auth side. */ 1265 swa = &ses->swcr_auth; 1266 switch (csp->csp_cipher_klen * 8) { 1267 case 128: 1268 axf = &auth_hash_nist_gmac_aes_128; 1269 break; 1270 case 192: 1271 axf = &auth_hash_nist_gmac_aes_192; 1272 break; 1273 case 256: 1274 axf = &auth_hash_nist_gmac_aes_256; 1275 break; 1276 default: 1277 return (EINVAL); 1278 } 1279 swa->sw_axf = axf; 1280 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1281 return (EINVAL); 1282 if (csp->csp_auth_mlen == 0) 1283 swa->sw_mlen = axf->hashsize; 1284 else 1285 swa->sw_mlen = csp->csp_auth_mlen; 1286 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1287 if (swa->sw_ictx == NULL) 1288 return (ENOBUFS); 1289 axf->Init(swa->sw_ictx); 1290 if (csp->csp_cipher_key != NULL) 1291 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, 1292 csp->csp_cipher_klen); 1293 1294 /* Second, setup the cipher side. */ 1295 return (swcr_setup_cipher(ses, csp)); 1296 } 1297 1298 static int 1299 swcr_setup_ccm(struct swcr_session *ses, 1300 const struct crypto_session_params *csp) 1301 { 1302 struct swcr_auth *swa; 1303 struct auth_hash *axf; 1304 1305 if (csp->csp_ivlen != AES_CCM_IV_LEN) 1306 return (EINVAL); 1307 1308 /* First, setup the auth side. */ 1309 swa = &ses->swcr_auth; 1310 switch (csp->csp_cipher_klen * 8) { 1311 case 128: 1312 axf = &auth_hash_ccm_cbc_mac_128; 1313 break; 1314 case 192: 1315 axf = &auth_hash_ccm_cbc_mac_192; 1316 break; 1317 case 256: 1318 axf = &auth_hash_ccm_cbc_mac_256; 1319 break; 1320 default: 1321 return (EINVAL); 1322 } 1323 swa->sw_axf = axf; 1324 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1325 return (EINVAL); 1326 if (csp->csp_auth_mlen == 0) 1327 swa->sw_mlen = axf->hashsize; 1328 else 1329 swa->sw_mlen = csp->csp_auth_mlen; 1330 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1331 if (swa->sw_ictx == NULL) 1332 return (ENOBUFS); 1333 axf->Init(swa->sw_ictx); 1334 if (csp->csp_cipher_key != NULL) 1335 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, 1336 csp->csp_cipher_klen); 1337 1338 /* Second, setup the cipher side. */ 1339 return (swcr_setup_cipher(ses, csp)); 1340 } 1341 1342 static int 1343 swcr_setup_chacha20_poly1305(struct swcr_session *ses, 1344 const struct crypto_session_params *csp) 1345 { 1346 struct swcr_auth *swa; 1347 struct auth_hash *axf; 1348 1349 if (csp->csp_ivlen != CHACHA20_POLY1305_IV_LEN) 1350 return (EINVAL); 1351 1352 /* First, setup the auth side. */ 1353 swa = &ses->swcr_auth; 1354 axf = &auth_hash_chacha20_poly1305; 1355 swa->sw_axf = axf; 1356 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1357 return (EINVAL); 1358 if (csp->csp_auth_mlen == 0) 1359 swa->sw_mlen = axf->hashsize; 1360 else 1361 swa->sw_mlen = csp->csp_auth_mlen; 1362 1363 /* The auth state is regenerated for each nonce. */ 1364 1365 /* Second, setup the cipher side. */ 1366 return (swcr_setup_cipher(ses, csp)); 1367 } 1368 1369 static bool 1370 swcr_auth_supported(const struct crypto_session_params *csp) 1371 { 1372 struct auth_hash *axf; 1373 1374 axf = crypto_auth_hash(csp); 1375 if (axf == NULL) 1376 return (false); 1377 switch (csp->csp_auth_alg) { 1378 case CRYPTO_SHA1_HMAC: 1379 case CRYPTO_SHA2_224_HMAC: 1380 case CRYPTO_SHA2_256_HMAC: 1381 case CRYPTO_SHA2_384_HMAC: 1382 case CRYPTO_SHA2_512_HMAC: 1383 case CRYPTO_NULL_HMAC: 1384 case CRYPTO_RIPEMD160_HMAC: 1385 break; 1386 case CRYPTO_AES_NIST_GMAC: 1387 switch (csp->csp_auth_klen * 8) { 1388 case 128: 1389 case 192: 1390 case 256: 1391 break; 1392 default: 1393 return (false); 1394 } 1395 if (csp->csp_auth_key == NULL) 1396 return (false); 1397 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1398 return (false); 1399 break; 1400 case CRYPTO_POLY1305: 1401 if (csp->csp_auth_klen != POLY1305_KEY_LEN) 1402 return (false); 1403 break; 1404 case CRYPTO_AES_CCM_CBC_MAC: 1405 switch (csp->csp_auth_klen * 8) { 1406 case 128: 1407 case 192: 1408 case 256: 1409 break; 1410 default: 1411 return (false); 1412 } 1413 if (csp->csp_auth_key == NULL) 1414 return (false); 1415 if (csp->csp_ivlen != AES_CCM_IV_LEN) 1416 return (false); 1417 break; 1418 } 1419 return (true); 1420 } 1421 1422 static bool 1423 swcr_cipher_supported(const struct crypto_session_params *csp) 1424 { 1425 struct enc_xform *txf; 1426 1427 txf = crypto_cipher(csp); 1428 if (txf == NULL) 1429 return (false); 1430 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && 1431 txf->ivsize != csp->csp_ivlen) 1432 return (false); 1433 return (true); 1434 } 1435 1436 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) 1437 1438 static int 1439 swcr_probesession(device_t dev, const struct crypto_session_params *csp) 1440 { 1441 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) 1442 return (EINVAL); 1443 switch (csp->csp_mode) { 1444 case CSP_MODE_COMPRESS: 1445 switch (csp->csp_cipher_alg) { 1446 case CRYPTO_DEFLATE_COMP: 1447 break; 1448 default: 1449 return (EINVAL); 1450 } 1451 break; 1452 case CSP_MODE_CIPHER: 1453 switch (csp->csp_cipher_alg) { 1454 case CRYPTO_AES_NIST_GCM_16: 1455 case CRYPTO_AES_CCM_16: 1456 case CRYPTO_CHACHA20_POLY1305: 1457 return (EINVAL); 1458 default: 1459 if (!swcr_cipher_supported(csp)) 1460 return (EINVAL); 1461 break; 1462 } 1463 break; 1464 case CSP_MODE_DIGEST: 1465 if (!swcr_auth_supported(csp)) 1466 return (EINVAL); 1467 break; 1468 case CSP_MODE_AEAD: 1469 switch (csp->csp_cipher_alg) { 1470 case CRYPTO_AES_NIST_GCM_16: 1471 case CRYPTO_AES_CCM_16: 1472 case CRYPTO_CHACHA20_POLY1305: 1473 break; 1474 default: 1475 return (EINVAL); 1476 } 1477 break; 1478 case CSP_MODE_ETA: 1479 /* AEAD algorithms cannot be used for EtA. */ 1480 switch (csp->csp_cipher_alg) { 1481 case CRYPTO_AES_NIST_GCM_16: 1482 case CRYPTO_AES_CCM_16: 1483 case CRYPTO_CHACHA20_POLY1305: 1484 return (EINVAL); 1485 } 1486 switch (csp->csp_auth_alg) { 1487 case CRYPTO_AES_NIST_GMAC: 1488 case CRYPTO_AES_CCM_CBC_MAC: 1489 return (EINVAL); 1490 } 1491 1492 if (!swcr_cipher_supported(csp) || 1493 !swcr_auth_supported(csp)) 1494 return (EINVAL); 1495 break; 1496 default: 1497 return (EINVAL); 1498 } 1499 1500 return (CRYPTODEV_PROBE_SOFTWARE); 1501 } 1502 1503 /* 1504 * Generate a new software session. 1505 */ 1506 static int 1507 swcr_newsession(device_t dev, crypto_session_t cses, 1508 const struct crypto_session_params *csp) 1509 { 1510 struct swcr_session *ses; 1511 struct swcr_encdec *swe; 1512 struct swcr_auth *swa; 1513 struct comp_algo *cxf; 1514 int error; 1515 1516 ses = crypto_get_driver_session(cses); 1517 mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); 1518 1519 error = 0; 1520 swe = &ses->swcr_encdec; 1521 swa = &ses->swcr_auth; 1522 switch (csp->csp_mode) { 1523 case CSP_MODE_COMPRESS: 1524 switch (csp->csp_cipher_alg) { 1525 case CRYPTO_DEFLATE_COMP: 1526 cxf = &comp_algo_deflate; 1527 break; 1528 #ifdef INVARIANTS 1529 default: 1530 panic("bad compression algo"); 1531 #endif 1532 } 1533 ses->swcr_compdec.sw_cxf = cxf; 1534 ses->swcr_process = swcr_compdec; 1535 break; 1536 case CSP_MODE_CIPHER: 1537 switch (csp->csp_cipher_alg) { 1538 case CRYPTO_NULL_CBC: 1539 ses->swcr_process = swcr_null; 1540 break; 1541 #ifdef INVARIANTS 1542 case CRYPTO_AES_NIST_GCM_16: 1543 case CRYPTO_AES_CCM_16: 1544 case CRYPTO_CHACHA20_POLY1305: 1545 panic("bad cipher algo"); 1546 #endif 1547 default: 1548 error = swcr_setup_cipher(ses, csp); 1549 if (error == 0) 1550 ses->swcr_process = swcr_encdec; 1551 } 1552 break; 1553 case CSP_MODE_DIGEST: 1554 error = swcr_setup_auth(ses, csp); 1555 break; 1556 case CSP_MODE_AEAD: 1557 switch (csp->csp_cipher_alg) { 1558 case CRYPTO_AES_NIST_GCM_16: 1559 error = swcr_setup_gcm(ses, csp); 1560 if (error == 0) 1561 ses->swcr_process = swcr_gcm; 1562 break; 1563 case CRYPTO_AES_CCM_16: 1564 error = swcr_setup_ccm(ses, csp); 1565 if (error == 0) 1566 ses->swcr_process = swcr_ccm; 1567 break; 1568 case CRYPTO_CHACHA20_POLY1305: 1569 error = swcr_setup_chacha20_poly1305(ses, csp); 1570 if (error == 0) 1571 ses->swcr_process = swcr_chacha20_poly1305; 1572 break; 1573 #ifdef INVARIANTS 1574 default: 1575 panic("bad aead algo"); 1576 #endif 1577 } 1578 break; 1579 case CSP_MODE_ETA: 1580 #ifdef INVARIANTS 1581 switch (csp->csp_cipher_alg) { 1582 case CRYPTO_AES_NIST_GCM_16: 1583 case CRYPTO_AES_CCM_16: 1584 case CRYPTO_CHACHA20_POLY1305: 1585 panic("bad eta cipher algo"); 1586 } 1587 switch (csp->csp_auth_alg) { 1588 case CRYPTO_AES_NIST_GMAC: 1589 case CRYPTO_AES_CCM_CBC_MAC: 1590 panic("bad eta auth algo"); 1591 } 1592 #endif 1593 1594 error = swcr_setup_auth(ses, csp); 1595 if (error) 1596 break; 1597 if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { 1598 /* Effectively degrade to digest mode. */ 1599 ses->swcr_process = swcr_authcompute; 1600 break; 1601 } 1602 1603 error = swcr_setup_cipher(ses, csp); 1604 if (error == 0) 1605 ses->swcr_process = swcr_eta; 1606 break; 1607 default: 1608 error = EINVAL; 1609 } 1610 1611 if (error) 1612 swcr_freesession(dev, cses); 1613 return (error); 1614 } 1615 1616 static void 1617 swcr_freesession(device_t dev, crypto_session_t cses) 1618 { 1619 struct swcr_session *ses; 1620 1621 ses = crypto_get_driver_session(cses); 1622 1623 mtx_destroy(&ses->swcr_lock); 1624 1625 zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); 1626 zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA); 1627 zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA); 1628 } 1629 1630 /* 1631 * Process a software request. 1632 */ 1633 static int 1634 swcr_process(device_t dev, struct cryptop *crp, int hint) 1635 { 1636 struct swcr_session *ses; 1637 1638 ses = crypto_get_driver_session(crp->crp_session); 1639 mtx_lock(&ses->swcr_lock); 1640 1641 crp->crp_etype = ses->swcr_process(ses, crp); 1642 1643 mtx_unlock(&ses->swcr_lock); 1644 crypto_done(crp); 1645 return (0); 1646 } 1647 1648 static void 1649 swcr_identify(driver_t *drv, device_t parent) 1650 { 1651 /* NB: order 10 is so we get attached after h/w devices */ 1652 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1653 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1654 panic("cryptosoft: could not attach"); 1655 } 1656 1657 static int 1658 swcr_probe(device_t dev) 1659 { 1660 device_set_desc(dev, "software crypto"); 1661 device_quiet(dev); 1662 return (BUS_PROBE_NOWILDCARD); 1663 } 1664 1665 static int 1666 swcr_attach(device_t dev) 1667 { 1668 1669 swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), 1670 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1671 if (swcr_id < 0) { 1672 device_printf(dev, "cannot initialize!"); 1673 return (ENXIO); 1674 } 1675 1676 return (0); 1677 } 1678 1679 static int 1680 swcr_detach(device_t dev) 1681 { 1682 crypto_unregister_all(swcr_id); 1683 return 0; 1684 } 1685 1686 static device_method_t swcr_methods[] = { 1687 DEVMETHOD(device_identify, swcr_identify), 1688 DEVMETHOD(device_probe, swcr_probe), 1689 DEVMETHOD(device_attach, swcr_attach), 1690 DEVMETHOD(device_detach, swcr_detach), 1691 1692 DEVMETHOD(cryptodev_probesession, swcr_probesession), 1693 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1694 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1695 DEVMETHOD(cryptodev_process, swcr_process), 1696 1697 {0, 0}, 1698 }; 1699 1700 static driver_t swcr_driver = { 1701 "cryptosoft", 1702 swcr_methods, 1703 0, /* NB: no softc */ 1704 }; 1705 static devclass_t swcr_devclass; 1706 1707 /* 1708 * NB: We explicitly reference the crypto module so we 1709 * get the necessary ordering when built as a loadable 1710 * module. This is required because we bundle the crypto 1711 * module code together with the cryptosoft driver (otherwise 1712 * normal module dependencies would handle things). 1713 */ 1714 extern int crypto_modevent(struct module *, int, void *); 1715 /* XXX where to attach */ 1716 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1717 MODULE_VERSION(cryptosoft, 1); 1718 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1719