1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 #include <sys/mutex.h> 49 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 53 #include <opencrypto/cryptodev.h> 54 #include <opencrypto/xform.h> 55 56 #include <sys/kobj.h> 57 #include <sys/bus.h> 58 #include "cryptodev_if.h" 59 60 struct swcr_auth { 61 void *sw_ictx; 62 void *sw_octx; 63 struct auth_hash *sw_axf; 64 uint16_t sw_mlen; 65 }; 66 67 struct swcr_encdec { 68 void *sw_kschedule; 69 struct enc_xform *sw_exf; 70 }; 71 72 struct swcr_compdec { 73 struct comp_algo *sw_cxf; 74 }; 75 76 struct swcr_session { 77 struct mtx swcr_lock; 78 int (*swcr_process)(struct swcr_session *, struct cryptop *); 79 80 struct swcr_auth swcr_auth; 81 struct swcr_encdec swcr_encdec; 82 struct swcr_compdec swcr_compdec; 83 }; 84 85 static int32_t swcr_id; 86 87 static void swcr_freesession(device_t dev, crypto_session_t cses); 88 89 /* Used for CRYPTO_NULL_CBC. */ 90 static int 91 swcr_null(struct swcr_session *ses, struct cryptop *crp) 92 { 93 94 return (0); 95 } 96 97 /* 98 * Apply a symmetric encryption/decryption algorithm. 99 */ 100 static int 101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp) 102 { 103 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; 104 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 105 const struct crypto_session_params *csp; 106 struct swcr_encdec *sw; 107 struct enc_xform *exf; 108 int i, blks, inlen, ivlen, outlen, resid; 109 struct crypto_buffer_cursor cc_in, cc_out; 110 const unsigned char *inblk; 111 unsigned char *outblk; 112 int error; 113 bool encrypting; 114 115 error = 0; 116 117 sw = &ses->swcr_encdec; 118 exf = sw->sw_exf; 119 ivlen = exf->ivsize; 120 121 if (exf->native_blocksize == 0) { 122 /* Check for non-padded data */ 123 if ((crp->crp_payload_length % exf->blocksize) != 0) 124 return (EINVAL); 125 126 blks = exf->blocksize; 127 } else 128 blks = exf->native_blocksize; 129 130 if (exf == &enc_xform_aes_icm && 131 (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 132 return (EINVAL); 133 134 if (crp->crp_cipher_key != NULL) { 135 csp = crypto_get_params(crp->crp_session); 136 error = exf->setkey(sw->sw_kschedule, 137 crp->crp_cipher_key, csp->csp_cipher_klen); 138 if (error) 139 return (error); 140 } 141 142 crypto_read_iv(crp, iv); 143 144 if (exf->reinit) { 145 /* 146 * xforms that provide a reinit method perform all IV 147 * handling themselves. 148 */ 149 exf->reinit(sw->sw_kschedule, iv); 150 } 151 152 ivp = iv; 153 154 crypto_cursor_init(&cc_in, &crp->crp_buf); 155 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 156 inlen = crypto_cursor_seglen(&cc_in); 157 inblk = crypto_cursor_segbase(&cc_in); 158 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 159 crypto_cursor_init(&cc_out, &crp->crp_obuf); 160 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 161 } else 162 cc_out = cc_in; 163 outlen = crypto_cursor_seglen(&cc_out); 164 outblk = crypto_cursor_segbase(&cc_out); 165 166 resid = crp->crp_payload_length; 167 encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); 168 169 /* 170 * Loop through encrypting blocks. 'inlen' is the remaining 171 * length of the current segment in the input buffer. 172 * 'outlen' is the remaining length of current segment in the 173 * output buffer. 174 */ 175 while (resid >= blks) { 176 /* 177 * If the current block is not contained within the 178 * current input/output segment, use 'blk' as a local 179 * buffer. 180 */ 181 if (inlen < blks) { 182 crypto_cursor_copydata(&cc_in, blks, blk); 183 inblk = blk; 184 } 185 if (outlen < blks) 186 outblk = blk; 187 188 /* 189 * Ciphers without a 'reinit' hook are assumed to be 190 * used in CBC mode where the chaining is done here. 191 */ 192 if (exf->reinit != NULL) { 193 if (encrypting) 194 exf->encrypt(sw->sw_kschedule, inblk, outblk); 195 else 196 exf->decrypt(sw->sw_kschedule, inblk, outblk); 197 } else if (encrypting) { 198 /* XOR with previous block */ 199 for (i = 0; i < blks; i++) 200 outblk[i] = inblk[i] ^ ivp[i]; 201 202 exf->encrypt(sw->sw_kschedule, outblk, outblk); 203 204 /* 205 * Keep encrypted block for XOR'ing 206 * with next block 207 */ 208 memcpy(iv, outblk, blks); 209 ivp = iv; 210 } else { /* decrypt */ 211 /* 212 * Keep encrypted block for XOR'ing 213 * with next block 214 */ 215 nivp = (ivp == iv) ? iv2 : iv; 216 memcpy(nivp, inblk, blks); 217 218 exf->decrypt(sw->sw_kschedule, inblk, outblk); 219 220 /* XOR with previous block */ 221 for (i = 0; i < blks; i++) 222 outblk[i] ^= ivp[i]; 223 224 ivp = nivp; 225 } 226 227 if (inlen < blks) { 228 inlen = crypto_cursor_seglen(&cc_in); 229 inblk = crypto_cursor_segbase(&cc_in); 230 } else { 231 crypto_cursor_advance(&cc_in, blks); 232 inlen -= blks; 233 inblk += blks; 234 } 235 236 if (outlen < blks) { 237 crypto_cursor_copyback(&cc_out, blks, blk); 238 outlen = crypto_cursor_seglen(&cc_out); 239 outblk = crypto_cursor_segbase(&cc_out); 240 } else { 241 crypto_cursor_advance(&cc_out, blks); 242 outlen -= blks; 243 outblk += blks; 244 } 245 246 resid -= blks; 247 } 248 249 /* Handle trailing partial block for stream ciphers. */ 250 if (resid > 0) { 251 KASSERT(exf->native_blocksize != 0, 252 ("%s: partial block of %d bytes for cipher %s", 253 __func__, i, exf->name)); 254 KASSERT(exf->reinit != NULL, 255 ("%s: partial block cipher %s without reinit hook", 256 __func__, exf->name)); 257 KASSERT(resid < blks, ("%s: partial block too big", __func__)); 258 259 inlen = crypto_cursor_seglen(&cc_in); 260 outlen = crypto_cursor_seglen(&cc_out); 261 if (inlen < resid) { 262 crypto_cursor_copydata(&cc_in, resid, blk); 263 inblk = blk; 264 } else 265 inblk = crypto_cursor_segbase(&cc_in); 266 if (outlen < resid) 267 outblk = blk; 268 else 269 outblk = crypto_cursor_segbase(&cc_out); 270 if (encrypting) 271 exf->encrypt_last(sw->sw_kschedule, inblk, outblk, 272 resid); 273 else 274 exf->decrypt_last(sw->sw_kschedule, inblk, outblk, 275 resid); 276 if (outlen < resid) 277 crypto_cursor_copyback(&cc_out, resid, blk); 278 } 279 280 explicit_bzero(blk, sizeof(blk)); 281 explicit_bzero(iv, sizeof(iv)); 282 explicit_bzero(iv2, sizeof(iv2)); 283 return (0); 284 } 285 286 static void 287 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, 288 const uint8_t *key, int klen) 289 { 290 291 switch (axf->type) { 292 case CRYPTO_SHA1_HMAC: 293 case CRYPTO_SHA2_224_HMAC: 294 case CRYPTO_SHA2_256_HMAC: 295 case CRYPTO_SHA2_384_HMAC: 296 case CRYPTO_SHA2_512_HMAC: 297 case CRYPTO_NULL_HMAC: 298 case CRYPTO_RIPEMD160_HMAC: 299 hmac_init_ipad(axf, key, klen, sw->sw_ictx); 300 hmac_init_opad(axf, key, klen, sw->sw_octx); 301 break; 302 case CRYPTO_POLY1305: 303 case CRYPTO_BLAKE2B: 304 case CRYPTO_BLAKE2S: 305 axf->Setkey(sw->sw_ictx, key, klen); 306 axf->Init(sw->sw_ictx); 307 break; 308 default: 309 panic("%s: algorithm %d doesn't use keys", __func__, axf->type); 310 } 311 } 312 313 /* 314 * Compute or verify hash. 315 */ 316 static int 317 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) 318 { 319 u_char aalg[HASH_MAX_LEN]; 320 const struct crypto_session_params *csp; 321 struct swcr_auth *sw; 322 struct auth_hash *axf; 323 union authctx ctx; 324 int err; 325 326 sw = &ses->swcr_auth; 327 328 axf = sw->sw_axf; 329 330 if (crp->crp_auth_key != NULL) { 331 csp = crypto_get_params(crp->crp_session); 332 swcr_authprepare(axf, sw, crp->crp_auth_key, 333 csp->csp_auth_klen); 334 } 335 336 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 337 338 err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 339 axf->Update, &ctx); 340 if (err) 341 return err; 342 343 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && 344 CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 345 err = crypto_apply_buf(&crp->crp_obuf, 346 crp->crp_payload_output_start, crp->crp_payload_length, 347 axf->Update, &ctx); 348 else 349 err = crypto_apply(crp, crp->crp_payload_start, 350 crp->crp_payload_length, axf->Update, &ctx); 351 if (err) 352 return err; 353 354 switch (axf->type) { 355 case CRYPTO_SHA1: 356 case CRYPTO_SHA2_224: 357 case CRYPTO_SHA2_256: 358 case CRYPTO_SHA2_384: 359 case CRYPTO_SHA2_512: 360 axf->Final(aalg, &ctx); 361 break; 362 363 case CRYPTO_SHA1_HMAC: 364 case CRYPTO_SHA2_224_HMAC: 365 case CRYPTO_SHA2_256_HMAC: 366 case CRYPTO_SHA2_384_HMAC: 367 case CRYPTO_SHA2_512_HMAC: 368 case CRYPTO_RIPEMD160_HMAC: 369 if (sw->sw_octx == NULL) 370 return EINVAL; 371 372 axf->Final(aalg, &ctx); 373 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 374 axf->Update(&ctx, aalg, axf->hashsize); 375 axf->Final(aalg, &ctx); 376 break; 377 378 case CRYPTO_BLAKE2B: 379 case CRYPTO_BLAKE2S: 380 case CRYPTO_NULL_HMAC: 381 case CRYPTO_POLY1305: 382 axf->Final(aalg, &ctx); 383 break; 384 } 385 386 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 387 u_char uaalg[HASH_MAX_LEN]; 388 389 crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); 390 if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) 391 err = EBADMSG; 392 explicit_bzero(uaalg, sizeof(uaalg)); 393 } else { 394 /* Inject the authentication data */ 395 crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); 396 } 397 explicit_bzero(aalg, sizeof(aalg)); 398 return (err); 399 } 400 401 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 402 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 403 404 static int 405 swcr_gmac(struct swcr_session *ses, struct cryptop *crp) 406 { 407 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 408 u_char *blk = (u_char *)blkbuf; 409 u_char tag[GMAC_DIGEST_LEN]; 410 u_char iv[AES_BLOCK_LEN]; 411 struct crypto_buffer_cursor cc; 412 const u_char *inblk; 413 union authctx ctx; 414 struct swcr_auth *swa; 415 struct auth_hash *axf; 416 uint32_t *blkp; 417 int blksz, error, ivlen, len, resid; 418 419 swa = &ses->swcr_auth; 420 axf = swa->sw_axf; 421 422 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 423 blksz = GMAC_BLOCK_LEN; 424 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 425 __func__)); 426 427 /* Initialize the IV */ 428 ivlen = AES_GCM_IV_LEN; 429 crypto_read_iv(crp, iv); 430 431 axf->Reinit(&ctx, iv, ivlen); 432 crypto_cursor_init(&cc, &crp->crp_buf); 433 crypto_cursor_advance(&cc, crp->crp_payload_start); 434 for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { 435 len = crypto_cursor_seglen(&cc); 436 if (len >= blksz) { 437 inblk = crypto_cursor_segbase(&cc); 438 len = rounddown(MIN(len, resid), blksz); 439 crypto_cursor_advance(&cc, len); 440 } else { 441 len = blksz; 442 crypto_cursor_copydata(&cc, len, blk); 443 inblk = blk; 444 } 445 axf->Update(&ctx, inblk, len); 446 } 447 if (resid > 0) { 448 memset(blk, 0, blksz); 449 crypto_cursor_copydata(&cc, resid, blk); 450 axf->Update(&ctx, blk, blksz); 451 } 452 453 /* length block */ 454 memset(blk, 0, blksz); 455 blkp = (uint32_t *)blk + 1; 456 *blkp = htobe32(crp->crp_payload_length * 8); 457 axf->Update(&ctx, blk, blksz); 458 459 /* Finalize MAC */ 460 axf->Final(tag, &ctx); 461 462 error = 0; 463 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 464 u_char tag2[GMAC_DIGEST_LEN]; 465 466 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 467 tag2); 468 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 469 error = EBADMSG; 470 explicit_bzero(tag2, sizeof(tag2)); 471 } else { 472 /* Inject the authentication data */ 473 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 474 } 475 explicit_bzero(blkbuf, sizeof(blkbuf)); 476 explicit_bzero(tag, sizeof(tag)); 477 explicit_bzero(iv, sizeof(iv)); 478 return (error); 479 } 480 481 static int 482 swcr_gcm(struct swcr_session *ses, struct cryptop *crp) 483 { 484 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 485 u_char *blk = (u_char *)blkbuf; 486 u_char tag[GMAC_DIGEST_LEN]; 487 u_char iv[AES_BLOCK_LEN]; 488 struct crypto_buffer_cursor cc_in, cc_out; 489 const u_char *inblk; 490 u_char *outblk; 491 union authctx ctx; 492 struct swcr_auth *swa; 493 struct swcr_encdec *swe; 494 struct auth_hash *axf; 495 struct enc_xform *exf; 496 uint32_t *blkp; 497 int blksz, error, ivlen, len, r, resid; 498 499 swa = &ses->swcr_auth; 500 axf = swa->sw_axf; 501 502 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 503 blksz = GMAC_BLOCK_LEN; 504 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 505 __func__)); 506 507 swe = &ses->swcr_encdec; 508 exf = swe->sw_exf; 509 KASSERT(axf->blocksize == exf->native_blocksize, 510 ("%s: blocksize mismatch", __func__)); 511 512 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 513 return (EINVAL); 514 515 /* Initialize the IV */ 516 ivlen = AES_GCM_IV_LEN; 517 bcopy(crp->crp_iv, iv, ivlen); 518 519 /* Supply MAC with IV */ 520 axf->Reinit(&ctx, iv, ivlen); 521 522 /* Supply MAC with AAD */ 523 crypto_cursor_init(&cc_in, &crp->crp_buf); 524 crypto_cursor_advance(&cc_in, crp->crp_aad_start); 525 for (resid = crp->crp_aad_length; resid >= blksz; resid -= len) { 526 len = crypto_cursor_seglen(&cc_in); 527 if (len >= blksz) { 528 inblk = crypto_cursor_segbase(&cc_in); 529 len = rounddown(MIN(len, resid), blksz); 530 crypto_cursor_advance(&cc_in, len); 531 } else { 532 len = blksz; 533 crypto_cursor_copydata(&cc_in, len, blk); 534 inblk = blk; 535 } 536 axf->Update(&ctx, inblk, len); 537 } 538 if (resid > 0) { 539 memset(blk, 0, blksz); 540 crypto_cursor_copydata(&cc_in, resid, blk); 541 axf->Update(&ctx, blk, blksz); 542 } 543 544 exf->reinit(swe->sw_kschedule, iv); 545 546 /* Do encryption with MAC */ 547 crypto_cursor_init(&cc_in, &crp->crp_buf); 548 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 549 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 550 crypto_cursor_init(&cc_out, &crp->crp_obuf); 551 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 552 } else 553 cc_out = cc_in; 554 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 555 if (crypto_cursor_seglen(&cc_in) < blksz) { 556 crypto_cursor_copydata(&cc_in, blksz, blk); 557 inblk = blk; 558 } else { 559 inblk = crypto_cursor_segbase(&cc_in); 560 crypto_cursor_advance(&cc_in, blksz); 561 } 562 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 563 if (crypto_cursor_seglen(&cc_out) < blksz) 564 outblk = blk; 565 else 566 outblk = crypto_cursor_segbase(&cc_out); 567 exf->encrypt(swe->sw_kschedule, inblk, outblk); 568 axf->Update(&ctx, outblk, blksz); 569 if (outblk == blk) 570 crypto_cursor_copyback(&cc_out, blksz, blk); 571 else 572 crypto_cursor_advance(&cc_out, blksz); 573 } else { 574 axf->Update(&ctx, inblk, blksz); 575 } 576 } 577 if (resid > 0) { 578 crypto_cursor_copydata(&cc_in, resid, blk); 579 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 580 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 581 crypto_cursor_copyback(&cc_out, resid, blk); 582 } 583 axf->Update(&ctx, blk, resid); 584 } 585 586 /* length block */ 587 memset(blk, 0, blksz); 588 blkp = (uint32_t *)blk + 1; 589 *blkp = htobe32(crp->crp_aad_length * 8); 590 blkp = (uint32_t *)blk + 3; 591 *blkp = htobe32(crp->crp_payload_length * 8); 592 axf->Update(&ctx, blk, blksz); 593 594 /* Finalize MAC */ 595 axf->Final(tag, &ctx); 596 597 /* Validate tag */ 598 error = 0; 599 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 600 u_char tag2[GMAC_DIGEST_LEN]; 601 602 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); 603 604 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 605 explicit_bzero(tag2, sizeof(tag2)); 606 if (r != 0) { 607 error = EBADMSG; 608 goto out; 609 } 610 611 /* tag matches, decrypt data */ 612 crypto_cursor_init(&cc_in, &crp->crp_buf); 613 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 614 for (resid = crp->crp_payload_length; resid > blksz; 615 resid -= blksz) { 616 if (crypto_cursor_seglen(&cc_in) < blksz) { 617 crypto_cursor_copydata(&cc_in, blksz, blk); 618 inblk = blk; 619 } else { 620 inblk = crypto_cursor_segbase(&cc_in); 621 crypto_cursor_advance(&cc_in, blksz); 622 } 623 if (crypto_cursor_seglen(&cc_out) < blksz) 624 outblk = blk; 625 else 626 outblk = crypto_cursor_segbase(&cc_out); 627 exf->decrypt(swe->sw_kschedule, inblk, outblk); 628 if (outblk == blk) 629 crypto_cursor_copyback(&cc_out, blksz, blk); 630 else 631 crypto_cursor_advance(&cc_out, blksz); 632 } 633 if (resid > 0) { 634 crypto_cursor_copydata(&cc_in, resid, blk); 635 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 636 crypto_cursor_copyback(&cc_out, resid, blk); 637 } 638 } else { 639 /* Inject the authentication data */ 640 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 641 } 642 643 out: 644 explicit_bzero(blkbuf, sizeof(blkbuf)); 645 explicit_bzero(tag, sizeof(tag)); 646 explicit_bzero(iv, sizeof(iv)); 647 648 return (error); 649 } 650 651 static int 652 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) 653 { 654 u_char tag[AES_CBC_MAC_HASH_LEN]; 655 u_char iv[AES_BLOCK_LEN]; 656 union authctx ctx; 657 struct swcr_auth *swa; 658 struct auth_hash *axf; 659 int error, ivlen; 660 661 swa = &ses->swcr_auth; 662 axf = swa->sw_axf; 663 664 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 665 666 /* Initialize the IV */ 667 ivlen = AES_CCM_IV_LEN; 668 crypto_read_iv(crp, iv); 669 670 /* 671 * AES CCM-CBC-MAC needs to know the length of both the auth 672 * data and payload data before doing the auth computation. 673 */ 674 ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length; 675 ctx.aes_cbc_mac_ctx.cryptDataLength = 0; 676 677 axf->Reinit(&ctx, iv, ivlen); 678 error = crypto_apply(crp, crp->crp_payload_start, 679 crp->crp_payload_length, axf->Update, &ctx); 680 if (error) 681 return (error); 682 683 /* Finalize MAC */ 684 axf->Final(tag, &ctx); 685 686 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 687 u_char tag2[AES_CBC_MAC_HASH_LEN]; 688 689 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 690 tag2); 691 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 692 error = EBADMSG; 693 explicit_bzero(tag2, sizeof(tag)); 694 } else { 695 /* Inject the authentication data */ 696 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 697 } 698 explicit_bzero(tag, sizeof(tag)); 699 explicit_bzero(iv, sizeof(iv)); 700 return (error); 701 } 702 703 static int 704 swcr_ccm(struct swcr_session *ses, struct cryptop *crp) 705 { 706 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 707 u_char *blk = (u_char *)blkbuf; 708 u_char tag[AES_CBC_MAC_HASH_LEN]; 709 u_char iv[AES_BLOCK_LEN]; 710 struct crypto_buffer_cursor cc_in, cc_out; 711 const u_char *inblk; 712 u_char *outblk; 713 union authctx ctx; 714 struct swcr_auth *swa; 715 struct swcr_encdec *swe; 716 struct auth_hash *axf; 717 struct enc_xform *exf; 718 int blksz, error, ivlen, r, resid; 719 720 swa = &ses->swcr_auth; 721 axf = swa->sw_axf; 722 723 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 724 blksz = AES_BLOCK_LEN; 725 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 726 __func__)); 727 728 swe = &ses->swcr_encdec; 729 exf = swe->sw_exf; 730 KASSERT(axf->blocksize == exf->native_blocksize, 731 ("%s: blocksize mismatch", __func__)); 732 733 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 734 return (EINVAL); 735 736 /* Initialize the IV */ 737 ivlen = AES_CCM_IV_LEN; 738 bcopy(crp->crp_iv, iv, ivlen); 739 740 /* 741 * AES CCM-CBC-MAC needs to know the length of both the auth 742 * data and payload data before doing the auth computation. 743 */ 744 ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; 745 ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; 746 747 /* Supply MAC with IV */ 748 axf->Reinit(&ctx, iv, ivlen); 749 750 /* Supply MAC with AAD */ 751 error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 752 axf->Update, &ctx); 753 if (error) 754 return (error); 755 756 exf->reinit(swe->sw_kschedule, iv); 757 758 /* Do encryption/decryption with MAC */ 759 crypto_cursor_init(&cc_in, &crp->crp_buf); 760 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 761 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 762 crypto_cursor_init(&cc_out, &crp->crp_obuf); 763 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 764 } else 765 cc_out = cc_in; 766 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 767 if (crypto_cursor_seglen(&cc_in) < blksz) { 768 crypto_cursor_copydata(&cc_in, blksz, blk); 769 inblk = blk; 770 } else { 771 inblk = crypto_cursor_segbase(&cc_in); 772 crypto_cursor_advance(&cc_in, blksz); 773 } 774 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 775 if (crypto_cursor_seglen(&cc_out) < blksz) 776 outblk = blk; 777 else 778 outblk = crypto_cursor_segbase(&cc_out); 779 axf->Update(&ctx, inblk, blksz); 780 exf->encrypt(swe->sw_kschedule, inblk, outblk); 781 if (outblk == blk) 782 crypto_cursor_copyback(&cc_out, blksz, blk); 783 else 784 crypto_cursor_advance(&cc_out, blksz); 785 } else { 786 /* 787 * One of the problems with CCM+CBC is that 788 * the authentication is done on the 789 * unencrypted data. As a result, we have to 790 * decrypt the data twice: once to generate 791 * the tag and a second time after the tag is 792 * verified. 793 */ 794 exf->decrypt(swe->sw_kschedule, inblk, blk); 795 axf->Update(&ctx, blk, blksz); 796 } 797 } 798 if (resid > 0) { 799 crypto_cursor_copydata(&cc_in, resid, blk); 800 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 801 axf->Update(&ctx, blk, resid); 802 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 803 crypto_cursor_copyback(&cc_out, resid, blk); 804 } else { 805 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 806 axf->Update(&ctx, blk, resid); 807 } 808 } 809 810 /* Finalize MAC */ 811 axf->Final(tag, &ctx); 812 813 /* Validate tag */ 814 error = 0; 815 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 816 u_char tag2[AES_CBC_MAC_HASH_LEN]; 817 818 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 819 tag2); 820 821 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 822 explicit_bzero(tag2, sizeof(tag2)); 823 if (r != 0) { 824 error = EBADMSG; 825 goto out; 826 } 827 828 /* tag matches, decrypt data */ 829 exf->reinit(swe->sw_kschedule, iv); 830 crypto_cursor_init(&cc_in, &crp->crp_buf); 831 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 832 for (resid = crp->crp_payload_length; resid > blksz; 833 resid -= blksz) { 834 if (crypto_cursor_seglen(&cc_in) < blksz) { 835 crypto_cursor_copydata(&cc_in, blksz, blk); 836 inblk = blk; 837 } else { 838 inblk = crypto_cursor_segbase(&cc_in); 839 crypto_cursor_advance(&cc_in, blksz); 840 } 841 if (crypto_cursor_seglen(&cc_out) < blksz) 842 outblk = blk; 843 else 844 outblk = crypto_cursor_segbase(&cc_out); 845 exf->decrypt(swe->sw_kschedule, inblk, outblk); 846 if (outblk == blk) 847 crypto_cursor_copyback(&cc_out, blksz, blk); 848 else 849 crypto_cursor_advance(&cc_out, blksz); 850 } 851 if (resid > 0) { 852 crypto_cursor_copydata(&cc_in, resid, blk); 853 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 854 crypto_cursor_copyback(&cc_out, resid, blk); 855 } 856 } else { 857 /* Inject the authentication data */ 858 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 859 } 860 861 out: 862 explicit_bzero(blkbuf, sizeof(blkbuf)); 863 explicit_bzero(tag, sizeof(tag)); 864 explicit_bzero(iv, sizeof(iv)); 865 return (error); 866 } 867 868 /* 869 * Apply a cipher and a digest to perform EtA. 870 */ 871 static int 872 swcr_eta(struct swcr_session *ses, struct cryptop *crp) 873 { 874 int error; 875 876 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 877 error = swcr_encdec(ses, crp); 878 if (error == 0) 879 error = swcr_authcompute(ses, crp); 880 } else { 881 error = swcr_authcompute(ses, crp); 882 if (error == 0) 883 error = swcr_encdec(ses, crp); 884 } 885 return (error); 886 } 887 888 /* 889 * Apply a compression/decompression algorithm 890 */ 891 static int 892 swcr_compdec(struct swcr_session *ses, struct cryptop *crp) 893 { 894 u_int8_t *data, *out; 895 struct comp_algo *cxf; 896 int adj; 897 u_int32_t result; 898 899 cxf = ses->swcr_compdec.sw_cxf; 900 901 /* We must handle the whole buffer of data in one time 902 * then if there is not all the data in the mbuf, we must 903 * copy in a buffer. 904 */ 905 906 data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); 907 if (data == NULL) 908 return (EINVAL); 909 crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, 910 data); 911 912 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) 913 result = cxf->compress(data, crp->crp_payload_length, &out); 914 else 915 result = cxf->decompress(data, crp->crp_payload_length, &out); 916 917 free(data, M_CRYPTO_DATA); 918 if (result == 0) 919 return (EINVAL); 920 crp->crp_olen = result; 921 922 /* Check the compressed size when doing compression */ 923 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { 924 if (result >= crp->crp_payload_length) { 925 /* Compression was useless, we lost time */ 926 free(out, M_CRYPTO_DATA); 927 return (0); 928 } 929 } 930 931 /* Copy back the (de)compressed data. m_copyback is 932 * extending the mbuf as necessary. 933 */ 934 crypto_copyback(crp, crp->crp_payload_start, result, out); 935 if (result < crp->crp_payload_length) { 936 switch (crp->crp_buf.cb_type) { 937 case CRYPTO_BUF_MBUF: 938 adj = result - crp->crp_payload_length; 939 m_adj(crp->crp_buf.cb_mbuf, adj); 940 break; 941 case CRYPTO_BUF_UIO: { 942 struct uio *uio = crp->crp_buf.cb_uio; 943 int ind; 944 945 adj = crp->crp_payload_length - result; 946 ind = uio->uio_iovcnt - 1; 947 948 while (adj > 0 && ind >= 0) { 949 if (adj < uio->uio_iov[ind].iov_len) { 950 uio->uio_iov[ind].iov_len -= adj; 951 break; 952 } 953 954 adj -= uio->uio_iov[ind].iov_len; 955 uio->uio_iov[ind].iov_len = 0; 956 ind--; 957 uio->uio_iovcnt--; 958 } 959 } 960 break; 961 default: 962 break; 963 } 964 } 965 free(out, M_CRYPTO_DATA); 966 return 0; 967 } 968 969 static int 970 swcr_setup_cipher(struct swcr_session *ses, 971 const struct crypto_session_params *csp) 972 { 973 struct swcr_encdec *swe; 974 struct enc_xform *txf; 975 int error; 976 977 swe = &ses->swcr_encdec; 978 txf = crypto_cipher(csp); 979 MPASS(txf->ivsize == csp->csp_ivlen); 980 if (txf->ctxsize != 0) { 981 swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, 982 M_NOWAIT); 983 if (swe->sw_kschedule == NULL) 984 return (ENOMEM); 985 } 986 if (csp->csp_cipher_key != NULL) { 987 error = txf->setkey(swe->sw_kschedule, 988 csp->csp_cipher_key, csp->csp_cipher_klen); 989 if (error) 990 return (error); 991 } 992 swe->sw_exf = txf; 993 return (0); 994 } 995 996 static int 997 swcr_setup_auth(struct swcr_session *ses, 998 const struct crypto_session_params *csp) 999 { 1000 struct swcr_auth *swa; 1001 struct auth_hash *axf; 1002 1003 swa = &ses->swcr_auth; 1004 1005 axf = crypto_auth_hash(csp); 1006 swa->sw_axf = axf; 1007 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1008 return (EINVAL); 1009 if (csp->csp_auth_mlen == 0) 1010 swa->sw_mlen = axf->hashsize; 1011 else 1012 swa->sw_mlen = csp->csp_auth_mlen; 1013 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1014 if (swa->sw_ictx == NULL) 1015 return (ENOBUFS); 1016 1017 switch (csp->csp_auth_alg) { 1018 case CRYPTO_SHA1_HMAC: 1019 case CRYPTO_SHA2_224_HMAC: 1020 case CRYPTO_SHA2_256_HMAC: 1021 case CRYPTO_SHA2_384_HMAC: 1022 case CRYPTO_SHA2_512_HMAC: 1023 case CRYPTO_NULL_HMAC: 1024 case CRYPTO_RIPEMD160_HMAC: 1025 swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 1026 M_NOWAIT); 1027 if (swa->sw_octx == NULL) 1028 return (ENOBUFS); 1029 1030 if (csp->csp_auth_key != NULL) { 1031 swcr_authprepare(axf, swa, csp->csp_auth_key, 1032 csp->csp_auth_klen); 1033 } 1034 1035 if (csp->csp_mode == CSP_MODE_DIGEST) 1036 ses->swcr_process = swcr_authcompute; 1037 break; 1038 case CRYPTO_SHA1: 1039 case CRYPTO_SHA2_224: 1040 case CRYPTO_SHA2_256: 1041 case CRYPTO_SHA2_384: 1042 case CRYPTO_SHA2_512: 1043 axf->Init(swa->sw_ictx); 1044 if (csp->csp_mode == CSP_MODE_DIGEST) 1045 ses->swcr_process = swcr_authcompute; 1046 break; 1047 case CRYPTO_AES_NIST_GMAC: 1048 axf->Init(swa->sw_ictx); 1049 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1050 csp->csp_auth_klen); 1051 if (csp->csp_mode == CSP_MODE_DIGEST) 1052 ses->swcr_process = swcr_gmac; 1053 break; 1054 case CRYPTO_POLY1305: 1055 case CRYPTO_BLAKE2B: 1056 case CRYPTO_BLAKE2S: 1057 /* 1058 * Blake2b and Blake2s support an optional key but do 1059 * not require one. 1060 */ 1061 if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) 1062 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1063 csp->csp_auth_klen); 1064 axf->Init(swa->sw_ictx); 1065 if (csp->csp_mode == CSP_MODE_DIGEST) 1066 ses->swcr_process = swcr_authcompute; 1067 break; 1068 case CRYPTO_AES_CCM_CBC_MAC: 1069 axf->Init(swa->sw_ictx); 1070 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1071 csp->csp_auth_klen); 1072 if (csp->csp_mode == CSP_MODE_DIGEST) 1073 ses->swcr_process = swcr_ccm_cbc_mac; 1074 break; 1075 } 1076 1077 return (0); 1078 } 1079 1080 static int 1081 swcr_setup_gcm(struct swcr_session *ses, 1082 const struct crypto_session_params *csp) 1083 { 1084 struct swcr_auth *swa; 1085 struct auth_hash *axf; 1086 1087 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1088 return (EINVAL); 1089 1090 /* First, setup the auth side. */ 1091 swa = &ses->swcr_auth; 1092 switch (csp->csp_cipher_klen * 8) { 1093 case 128: 1094 axf = &auth_hash_nist_gmac_aes_128; 1095 break; 1096 case 192: 1097 axf = &auth_hash_nist_gmac_aes_192; 1098 break; 1099 case 256: 1100 axf = &auth_hash_nist_gmac_aes_256; 1101 break; 1102 default: 1103 return (EINVAL); 1104 } 1105 swa->sw_axf = axf; 1106 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1107 return (EINVAL); 1108 if (csp->csp_auth_mlen == 0) 1109 swa->sw_mlen = axf->hashsize; 1110 else 1111 swa->sw_mlen = csp->csp_auth_mlen; 1112 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1113 if (swa->sw_ictx == NULL) 1114 return (ENOBUFS); 1115 axf->Init(swa->sw_ictx); 1116 if (csp->csp_cipher_key != NULL) 1117 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, 1118 csp->csp_cipher_klen); 1119 1120 /* Second, setup the cipher side. */ 1121 return (swcr_setup_cipher(ses, csp)); 1122 } 1123 1124 static int 1125 swcr_setup_ccm(struct swcr_session *ses, 1126 const struct crypto_session_params *csp) 1127 { 1128 struct swcr_auth *swa; 1129 struct auth_hash *axf; 1130 1131 if (csp->csp_ivlen != AES_CCM_IV_LEN) 1132 return (EINVAL); 1133 1134 /* First, setup the auth side. */ 1135 swa = &ses->swcr_auth; 1136 switch (csp->csp_cipher_klen * 8) { 1137 case 128: 1138 axf = &auth_hash_ccm_cbc_mac_128; 1139 break; 1140 case 192: 1141 axf = &auth_hash_ccm_cbc_mac_192; 1142 break; 1143 case 256: 1144 axf = &auth_hash_ccm_cbc_mac_256; 1145 break; 1146 default: 1147 return (EINVAL); 1148 } 1149 swa->sw_axf = axf; 1150 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1151 return (EINVAL); 1152 if (csp->csp_auth_mlen == 0) 1153 swa->sw_mlen = axf->hashsize; 1154 else 1155 swa->sw_mlen = csp->csp_auth_mlen; 1156 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1157 if (swa->sw_ictx == NULL) 1158 return (ENOBUFS); 1159 axf->Init(swa->sw_ictx); 1160 if (csp->csp_cipher_key != NULL) 1161 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, 1162 csp->csp_cipher_klen); 1163 1164 /* Second, setup the cipher side. */ 1165 return (swcr_setup_cipher(ses, csp)); 1166 } 1167 1168 static bool 1169 swcr_auth_supported(const struct crypto_session_params *csp) 1170 { 1171 struct auth_hash *axf; 1172 1173 axf = crypto_auth_hash(csp); 1174 if (axf == NULL) 1175 return (false); 1176 switch (csp->csp_auth_alg) { 1177 case CRYPTO_SHA1_HMAC: 1178 case CRYPTO_SHA2_224_HMAC: 1179 case CRYPTO_SHA2_256_HMAC: 1180 case CRYPTO_SHA2_384_HMAC: 1181 case CRYPTO_SHA2_512_HMAC: 1182 case CRYPTO_NULL_HMAC: 1183 case CRYPTO_RIPEMD160_HMAC: 1184 break; 1185 case CRYPTO_AES_NIST_GMAC: 1186 switch (csp->csp_auth_klen * 8) { 1187 case 128: 1188 case 192: 1189 case 256: 1190 break; 1191 default: 1192 return (false); 1193 } 1194 if (csp->csp_auth_key == NULL) 1195 return (false); 1196 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1197 return (false); 1198 break; 1199 case CRYPTO_POLY1305: 1200 if (csp->csp_auth_klen != POLY1305_KEY_LEN) 1201 return (false); 1202 break; 1203 case CRYPTO_AES_CCM_CBC_MAC: 1204 switch (csp->csp_auth_klen * 8) { 1205 case 128: 1206 case 192: 1207 case 256: 1208 break; 1209 default: 1210 return (false); 1211 } 1212 if (csp->csp_auth_key == NULL) 1213 return (false); 1214 if (csp->csp_ivlen != AES_CCM_IV_LEN) 1215 return (false); 1216 break; 1217 } 1218 return (true); 1219 } 1220 1221 static bool 1222 swcr_cipher_supported(const struct crypto_session_params *csp) 1223 { 1224 struct enc_xform *txf; 1225 1226 txf = crypto_cipher(csp); 1227 if (txf == NULL) 1228 return (false); 1229 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && 1230 txf->ivsize != csp->csp_ivlen) 1231 return (false); 1232 return (true); 1233 } 1234 1235 static int 1236 swcr_probesession(device_t dev, const struct crypto_session_params *csp) 1237 { 1238 1239 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT)) != 0) 1240 return (EINVAL); 1241 switch (csp->csp_mode) { 1242 case CSP_MODE_COMPRESS: 1243 switch (csp->csp_cipher_alg) { 1244 case CRYPTO_DEFLATE_COMP: 1245 break; 1246 default: 1247 return (EINVAL); 1248 } 1249 break; 1250 case CSP_MODE_CIPHER: 1251 switch (csp->csp_cipher_alg) { 1252 case CRYPTO_AES_NIST_GCM_16: 1253 case CRYPTO_AES_CCM_16: 1254 return (EINVAL); 1255 default: 1256 if (!swcr_cipher_supported(csp)) 1257 return (EINVAL); 1258 break; 1259 } 1260 break; 1261 case CSP_MODE_DIGEST: 1262 if (!swcr_auth_supported(csp)) 1263 return (EINVAL); 1264 break; 1265 case CSP_MODE_AEAD: 1266 switch (csp->csp_cipher_alg) { 1267 case CRYPTO_AES_NIST_GCM_16: 1268 case CRYPTO_AES_CCM_16: 1269 break; 1270 default: 1271 return (EINVAL); 1272 } 1273 break; 1274 case CSP_MODE_ETA: 1275 /* AEAD algorithms cannot be used for EtA. */ 1276 switch (csp->csp_cipher_alg) { 1277 case CRYPTO_AES_NIST_GCM_16: 1278 case CRYPTO_AES_CCM_16: 1279 return (EINVAL); 1280 } 1281 switch (csp->csp_auth_alg) { 1282 case CRYPTO_AES_NIST_GMAC: 1283 case CRYPTO_AES_CCM_CBC_MAC: 1284 return (EINVAL); 1285 } 1286 1287 if (!swcr_cipher_supported(csp) || 1288 !swcr_auth_supported(csp)) 1289 return (EINVAL); 1290 break; 1291 default: 1292 return (EINVAL); 1293 } 1294 1295 return (CRYPTODEV_PROBE_SOFTWARE); 1296 } 1297 1298 /* 1299 * Generate a new software session. 1300 */ 1301 static int 1302 swcr_newsession(device_t dev, crypto_session_t cses, 1303 const struct crypto_session_params *csp) 1304 { 1305 struct swcr_session *ses; 1306 struct swcr_encdec *swe; 1307 struct swcr_auth *swa; 1308 struct comp_algo *cxf; 1309 int error; 1310 1311 ses = crypto_get_driver_session(cses); 1312 mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); 1313 1314 error = 0; 1315 swe = &ses->swcr_encdec; 1316 swa = &ses->swcr_auth; 1317 switch (csp->csp_mode) { 1318 case CSP_MODE_COMPRESS: 1319 switch (csp->csp_cipher_alg) { 1320 case CRYPTO_DEFLATE_COMP: 1321 cxf = &comp_algo_deflate; 1322 break; 1323 #ifdef INVARIANTS 1324 default: 1325 panic("bad compression algo"); 1326 #endif 1327 } 1328 ses->swcr_compdec.sw_cxf = cxf; 1329 ses->swcr_process = swcr_compdec; 1330 break; 1331 case CSP_MODE_CIPHER: 1332 switch (csp->csp_cipher_alg) { 1333 case CRYPTO_NULL_CBC: 1334 ses->swcr_process = swcr_null; 1335 break; 1336 #ifdef INVARIANTS 1337 case CRYPTO_AES_NIST_GCM_16: 1338 case CRYPTO_AES_CCM_16: 1339 panic("bad cipher algo"); 1340 #endif 1341 default: 1342 error = swcr_setup_cipher(ses, csp); 1343 if (error == 0) 1344 ses->swcr_process = swcr_encdec; 1345 } 1346 break; 1347 case CSP_MODE_DIGEST: 1348 error = swcr_setup_auth(ses, csp); 1349 break; 1350 case CSP_MODE_AEAD: 1351 switch (csp->csp_cipher_alg) { 1352 case CRYPTO_AES_NIST_GCM_16: 1353 error = swcr_setup_gcm(ses, csp); 1354 if (error == 0) 1355 ses->swcr_process = swcr_gcm; 1356 break; 1357 case CRYPTO_AES_CCM_16: 1358 error = swcr_setup_ccm(ses, csp); 1359 if (error == 0) 1360 ses->swcr_process = swcr_ccm; 1361 break; 1362 #ifdef INVARIANTS 1363 default: 1364 panic("bad aead algo"); 1365 #endif 1366 } 1367 break; 1368 case CSP_MODE_ETA: 1369 #ifdef INVARIANTS 1370 switch (csp->csp_cipher_alg) { 1371 case CRYPTO_AES_NIST_GCM_16: 1372 case CRYPTO_AES_CCM_16: 1373 panic("bad eta cipher algo"); 1374 } 1375 switch (csp->csp_auth_alg) { 1376 case CRYPTO_AES_NIST_GMAC: 1377 case CRYPTO_AES_CCM_CBC_MAC: 1378 panic("bad eta auth algo"); 1379 } 1380 #endif 1381 1382 error = swcr_setup_auth(ses, csp); 1383 if (error) 1384 break; 1385 if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { 1386 /* Effectively degrade to digest mode. */ 1387 ses->swcr_process = swcr_authcompute; 1388 break; 1389 } 1390 1391 error = swcr_setup_cipher(ses, csp); 1392 if (error == 0) 1393 ses->swcr_process = swcr_eta; 1394 break; 1395 default: 1396 error = EINVAL; 1397 } 1398 1399 if (error) 1400 swcr_freesession(dev, cses); 1401 return (error); 1402 } 1403 1404 static void 1405 swcr_freesession(device_t dev, crypto_session_t cses) 1406 { 1407 struct swcr_session *ses; 1408 struct swcr_auth *swa; 1409 struct auth_hash *axf; 1410 1411 ses = crypto_get_driver_session(cses); 1412 1413 mtx_destroy(&ses->swcr_lock); 1414 1415 zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); 1416 1417 axf = ses->swcr_auth.sw_axf; 1418 if (axf != NULL) { 1419 swa = &ses->swcr_auth; 1420 if (swa->sw_ictx != NULL) { 1421 explicit_bzero(swa->sw_ictx, axf->ctxsize); 1422 free(swa->sw_ictx, M_CRYPTO_DATA); 1423 } 1424 if (swa->sw_octx != NULL) { 1425 explicit_bzero(swa->sw_octx, axf->ctxsize); 1426 free(swa->sw_octx, M_CRYPTO_DATA); 1427 } 1428 } 1429 } 1430 1431 /* 1432 * Process a software request. 1433 */ 1434 static int 1435 swcr_process(device_t dev, struct cryptop *crp, int hint) 1436 { 1437 struct swcr_session *ses; 1438 1439 ses = crypto_get_driver_session(crp->crp_session); 1440 mtx_lock(&ses->swcr_lock); 1441 1442 crp->crp_etype = ses->swcr_process(ses, crp); 1443 1444 mtx_unlock(&ses->swcr_lock); 1445 crypto_done(crp); 1446 return (0); 1447 } 1448 1449 static void 1450 swcr_identify(driver_t *drv, device_t parent) 1451 { 1452 /* NB: order 10 is so we get attached after h/w devices */ 1453 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1454 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1455 panic("cryptosoft: could not attach"); 1456 } 1457 1458 static int 1459 swcr_probe(device_t dev) 1460 { 1461 device_set_desc(dev, "software crypto"); 1462 return (BUS_PROBE_NOWILDCARD); 1463 } 1464 1465 static int 1466 swcr_attach(device_t dev) 1467 { 1468 1469 swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), 1470 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1471 if (swcr_id < 0) { 1472 device_printf(dev, "cannot initialize!"); 1473 return (ENXIO); 1474 } 1475 1476 return (0); 1477 } 1478 1479 static int 1480 swcr_detach(device_t dev) 1481 { 1482 crypto_unregister_all(swcr_id); 1483 return 0; 1484 } 1485 1486 static device_method_t swcr_methods[] = { 1487 DEVMETHOD(device_identify, swcr_identify), 1488 DEVMETHOD(device_probe, swcr_probe), 1489 DEVMETHOD(device_attach, swcr_attach), 1490 DEVMETHOD(device_detach, swcr_detach), 1491 1492 DEVMETHOD(cryptodev_probesession, swcr_probesession), 1493 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1494 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1495 DEVMETHOD(cryptodev_process, swcr_process), 1496 1497 {0, 0}, 1498 }; 1499 1500 static driver_t swcr_driver = { 1501 "cryptosoft", 1502 swcr_methods, 1503 0, /* NB: no softc */ 1504 }; 1505 static devclass_t swcr_devclass; 1506 1507 /* 1508 * NB: We explicitly reference the crypto module so we 1509 * get the necessary ordering when built as a loadable 1510 * module. This is required because we bundle the crypto 1511 * module code together with the cryptosoft driver (otherwise 1512 * normal module dependencies would handle things). 1513 */ 1514 extern int crypto_modevent(struct module *, int, void *); 1515 /* XXX where to attach */ 1516 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1517 MODULE_VERSION(cryptosoft, 1); 1518 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1519