1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 #include <sys/mutex.h> 49 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 53 #include <opencrypto/cryptodev.h> 54 #include <opencrypto/xform.h> 55 56 #include <sys/kobj.h> 57 #include <sys/bus.h> 58 #include "cryptodev_if.h" 59 60 struct swcr_auth { 61 void *sw_ictx; 62 void *sw_octx; 63 struct auth_hash *sw_axf; 64 uint16_t sw_mlen; 65 }; 66 67 struct swcr_encdec { 68 void *sw_kschedule; 69 struct enc_xform *sw_exf; 70 }; 71 72 struct swcr_compdec { 73 struct comp_algo *sw_cxf; 74 }; 75 76 struct swcr_session { 77 struct mtx swcr_lock; 78 int (*swcr_process)(struct swcr_session *, struct cryptop *); 79 80 struct swcr_auth swcr_auth; 81 struct swcr_encdec swcr_encdec; 82 struct swcr_compdec swcr_compdec; 83 }; 84 85 static int32_t swcr_id; 86 87 static void swcr_freesession(device_t dev, crypto_session_t cses); 88 89 /* Used for CRYPTO_NULL_CBC. */ 90 static int 91 swcr_null(struct swcr_session *ses, struct cryptop *crp) 92 { 93 94 return (0); 95 } 96 97 /* 98 * Apply a symmetric encryption/decryption algorithm. 99 */ 100 static int 101 swcr_encdec(struct swcr_session *ses, struct cryptop *crp) 102 { 103 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; 104 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 105 const struct crypto_session_params *csp; 106 struct swcr_encdec *sw; 107 struct enc_xform *exf; 108 int i, blks, inlen, ivlen, outlen, resid; 109 struct crypto_buffer_cursor cc_in, cc_out; 110 const unsigned char *inblk; 111 unsigned char *outblk; 112 int error; 113 bool encrypting; 114 115 error = 0; 116 117 sw = &ses->swcr_encdec; 118 exf = sw->sw_exf; 119 ivlen = exf->ivsize; 120 121 if (exf->native_blocksize == 0) { 122 /* Check for non-padded data */ 123 if ((crp->crp_payload_length % exf->blocksize) != 0) 124 return (EINVAL); 125 126 blks = exf->blocksize; 127 } else 128 blks = exf->native_blocksize; 129 130 if (exf == &enc_xform_aes_icm && 131 (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 132 return (EINVAL); 133 134 if (crp->crp_cipher_key != NULL) { 135 csp = crypto_get_params(crp->crp_session); 136 error = exf->setkey(sw->sw_kschedule, 137 crp->crp_cipher_key, csp->csp_cipher_klen); 138 if (error) 139 return (error); 140 } 141 142 crypto_read_iv(crp, iv); 143 144 if (exf->reinit) { 145 /* 146 * xforms that provide a reinit method perform all IV 147 * handling themselves. 148 */ 149 exf->reinit(sw->sw_kschedule, iv); 150 } 151 152 ivp = iv; 153 154 crypto_cursor_init(&cc_in, &crp->crp_buf); 155 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 156 inlen = crypto_cursor_seglen(&cc_in); 157 inblk = crypto_cursor_segbase(&cc_in); 158 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 159 crypto_cursor_init(&cc_out, &crp->crp_obuf); 160 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 161 } else 162 cc_out = cc_in; 163 outlen = crypto_cursor_seglen(&cc_out); 164 outblk = crypto_cursor_segbase(&cc_out); 165 166 resid = crp->crp_payload_length; 167 encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op); 168 169 /* 170 * Loop through encrypting blocks. 'inlen' is the remaining 171 * length of the current segment in the input buffer. 172 * 'outlen' is the remaining length of current segment in the 173 * output buffer. 174 */ 175 while (resid >= blks) { 176 /* 177 * If the current block is not contained within the 178 * current input/output segment, use 'blk' as a local 179 * buffer. 180 */ 181 if (inlen < blks) { 182 crypto_cursor_copydata(&cc_in, blks, blk); 183 inblk = blk; 184 } 185 if (outlen < blks) 186 outblk = blk; 187 188 /* 189 * Ciphers without a 'reinit' hook are assumed to be 190 * used in CBC mode where the chaining is done here. 191 */ 192 if (exf->reinit != NULL) { 193 if (encrypting) 194 exf->encrypt(sw->sw_kschedule, inblk, outblk); 195 else 196 exf->decrypt(sw->sw_kschedule, inblk, outblk); 197 } else if (encrypting) { 198 /* XOR with previous block */ 199 for (i = 0; i < blks; i++) 200 outblk[i] = inblk[i] ^ ivp[i]; 201 202 exf->encrypt(sw->sw_kschedule, outblk, outblk); 203 204 /* 205 * Keep encrypted block for XOR'ing 206 * with next block 207 */ 208 memcpy(iv, outblk, blks); 209 ivp = iv; 210 } else { /* decrypt */ 211 /* 212 * Keep encrypted block for XOR'ing 213 * with next block 214 */ 215 nivp = (ivp == iv) ? iv2 : iv; 216 memcpy(nivp, inblk, blks); 217 218 exf->decrypt(sw->sw_kschedule, inblk, outblk); 219 220 /* XOR with previous block */ 221 for (i = 0; i < blks; i++) 222 outblk[i] ^= ivp[i]; 223 224 ivp = nivp; 225 } 226 227 if (inlen < blks) { 228 inlen = crypto_cursor_seglen(&cc_in); 229 inblk = crypto_cursor_segbase(&cc_in); 230 } else { 231 crypto_cursor_advance(&cc_in, blks); 232 inlen -= blks; 233 inblk += blks; 234 } 235 236 if (outlen < blks) { 237 crypto_cursor_copyback(&cc_out, blks, blk); 238 outlen = crypto_cursor_seglen(&cc_out); 239 outblk = crypto_cursor_segbase(&cc_out); 240 } else { 241 crypto_cursor_advance(&cc_out, blks); 242 outlen -= blks; 243 outblk += blks; 244 } 245 246 resid -= blks; 247 } 248 249 /* Handle trailing partial block for stream ciphers. */ 250 if (resid > 0) { 251 KASSERT(exf->native_blocksize != 0, 252 ("%s: partial block of %d bytes for cipher %s", 253 __func__, i, exf->name)); 254 KASSERT(exf->reinit != NULL, 255 ("%s: partial block cipher %s without reinit hook", 256 __func__, exf->name)); 257 KASSERT(resid < blks, ("%s: partial block too big", __func__)); 258 259 inlen = crypto_cursor_seglen(&cc_in); 260 outlen = crypto_cursor_seglen(&cc_out); 261 if (inlen < resid) { 262 crypto_cursor_copydata(&cc_in, resid, blk); 263 inblk = blk; 264 } else 265 inblk = crypto_cursor_segbase(&cc_in); 266 if (outlen < resid) 267 outblk = blk; 268 else 269 outblk = crypto_cursor_segbase(&cc_out); 270 if (encrypting) 271 exf->encrypt_last(sw->sw_kschedule, inblk, outblk, 272 resid); 273 else 274 exf->decrypt_last(sw->sw_kschedule, inblk, outblk, 275 resid); 276 if (outlen < resid) 277 crypto_cursor_copyback(&cc_out, resid, blk); 278 } 279 280 explicit_bzero(blk, sizeof(blk)); 281 explicit_bzero(iv, sizeof(iv)); 282 explicit_bzero(iv2, sizeof(iv2)); 283 return (0); 284 } 285 286 static void 287 swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw, 288 const uint8_t *key, int klen) 289 { 290 291 switch (axf->type) { 292 case CRYPTO_SHA1_HMAC: 293 case CRYPTO_SHA2_224_HMAC: 294 case CRYPTO_SHA2_256_HMAC: 295 case CRYPTO_SHA2_384_HMAC: 296 case CRYPTO_SHA2_512_HMAC: 297 case CRYPTO_NULL_HMAC: 298 case CRYPTO_RIPEMD160_HMAC: 299 hmac_init_ipad(axf, key, klen, sw->sw_ictx); 300 hmac_init_opad(axf, key, klen, sw->sw_octx); 301 break; 302 case CRYPTO_POLY1305: 303 case CRYPTO_BLAKE2B: 304 case CRYPTO_BLAKE2S: 305 axf->Setkey(sw->sw_ictx, key, klen); 306 axf->Init(sw->sw_ictx); 307 break; 308 default: 309 panic("%s: algorithm %d doesn't use keys", __func__, axf->type); 310 } 311 } 312 313 /* 314 * Compute or verify hash. 315 */ 316 static int 317 swcr_authcompute(struct swcr_session *ses, struct cryptop *crp) 318 { 319 u_char aalg[HASH_MAX_LEN]; 320 const struct crypto_session_params *csp; 321 struct swcr_auth *sw; 322 struct auth_hash *axf; 323 union authctx ctx; 324 int err; 325 326 sw = &ses->swcr_auth; 327 328 axf = sw->sw_axf; 329 330 if (crp->crp_auth_key != NULL) { 331 csp = crypto_get_params(crp->crp_session); 332 swcr_authprepare(axf, sw, crp->crp_auth_key, 333 csp->csp_auth_klen); 334 } 335 336 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 337 338 if (crp->crp_aad != NULL) 339 err = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 340 else 341 err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length, 342 axf->Update, &ctx); 343 if (err) 344 return err; 345 346 if (CRYPTO_HAS_OUTPUT_BUFFER(crp) && 347 CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) 348 err = crypto_apply_buf(&crp->crp_obuf, 349 crp->crp_payload_output_start, crp->crp_payload_length, 350 axf->Update, &ctx); 351 else 352 err = crypto_apply(crp, crp->crp_payload_start, 353 crp->crp_payload_length, axf->Update, &ctx); 354 if (err) 355 return err; 356 357 switch (axf->type) { 358 case CRYPTO_SHA1: 359 case CRYPTO_SHA2_224: 360 case CRYPTO_SHA2_256: 361 case CRYPTO_SHA2_384: 362 case CRYPTO_SHA2_512: 363 axf->Final(aalg, &ctx); 364 break; 365 366 case CRYPTO_SHA1_HMAC: 367 case CRYPTO_SHA2_224_HMAC: 368 case CRYPTO_SHA2_256_HMAC: 369 case CRYPTO_SHA2_384_HMAC: 370 case CRYPTO_SHA2_512_HMAC: 371 case CRYPTO_RIPEMD160_HMAC: 372 if (sw->sw_octx == NULL) 373 return EINVAL; 374 375 axf->Final(aalg, &ctx); 376 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 377 axf->Update(&ctx, aalg, axf->hashsize); 378 axf->Final(aalg, &ctx); 379 break; 380 381 case CRYPTO_BLAKE2B: 382 case CRYPTO_BLAKE2S: 383 case CRYPTO_NULL_HMAC: 384 case CRYPTO_POLY1305: 385 axf->Final(aalg, &ctx); 386 break; 387 } 388 389 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 390 u_char uaalg[HASH_MAX_LEN]; 391 392 crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg); 393 if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0) 394 err = EBADMSG; 395 explicit_bzero(uaalg, sizeof(uaalg)); 396 } else { 397 /* Inject the authentication data */ 398 crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg); 399 } 400 explicit_bzero(aalg, sizeof(aalg)); 401 return (err); 402 } 403 404 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 405 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 406 407 static int 408 swcr_gmac(struct swcr_session *ses, struct cryptop *crp) 409 { 410 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 411 u_char *blk = (u_char *)blkbuf; 412 u_char tag[GMAC_DIGEST_LEN]; 413 u_char iv[AES_BLOCK_LEN]; 414 struct crypto_buffer_cursor cc; 415 const u_char *inblk; 416 union authctx ctx; 417 struct swcr_auth *swa; 418 struct auth_hash *axf; 419 uint32_t *blkp; 420 int blksz, error, ivlen, len, resid; 421 422 swa = &ses->swcr_auth; 423 axf = swa->sw_axf; 424 425 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 426 blksz = GMAC_BLOCK_LEN; 427 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 428 __func__)); 429 430 /* Initialize the IV */ 431 ivlen = AES_GCM_IV_LEN; 432 crypto_read_iv(crp, iv); 433 434 axf->Reinit(&ctx, iv, ivlen); 435 crypto_cursor_init(&cc, &crp->crp_buf); 436 crypto_cursor_advance(&cc, crp->crp_payload_start); 437 for (resid = crp->crp_payload_length; resid >= blksz; resid -= len) { 438 len = crypto_cursor_seglen(&cc); 439 if (len >= blksz) { 440 inblk = crypto_cursor_segbase(&cc); 441 len = rounddown(MIN(len, resid), blksz); 442 crypto_cursor_advance(&cc, len); 443 } else { 444 len = blksz; 445 crypto_cursor_copydata(&cc, len, blk); 446 inblk = blk; 447 } 448 axf->Update(&ctx, inblk, len); 449 } 450 if (resid > 0) { 451 memset(blk, 0, blksz); 452 crypto_cursor_copydata(&cc, resid, blk); 453 axf->Update(&ctx, blk, blksz); 454 } 455 456 /* length block */ 457 memset(blk, 0, blksz); 458 blkp = (uint32_t *)blk + 1; 459 *blkp = htobe32(crp->crp_payload_length * 8); 460 axf->Update(&ctx, blk, blksz); 461 462 /* Finalize MAC */ 463 axf->Final(tag, &ctx); 464 465 error = 0; 466 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 467 u_char tag2[GMAC_DIGEST_LEN]; 468 469 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 470 tag2); 471 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 472 error = EBADMSG; 473 explicit_bzero(tag2, sizeof(tag2)); 474 } else { 475 /* Inject the authentication data */ 476 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 477 } 478 explicit_bzero(blkbuf, sizeof(blkbuf)); 479 explicit_bzero(tag, sizeof(tag)); 480 explicit_bzero(iv, sizeof(iv)); 481 return (error); 482 } 483 484 static int 485 swcr_gcm(struct swcr_session *ses, struct cryptop *crp) 486 { 487 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 488 u_char *blk = (u_char *)blkbuf; 489 u_char tag[GMAC_DIGEST_LEN]; 490 u_char iv[AES_BLOCK_LEN]; 491 struct crypto_buffer_cursor cc_in, cc_out; 492 const u_char *inblk; 493 u_char *outblk; 494 union authctx ctx; 495 struct swcr_auth *swa; 496 struct swcr_encdec *swe; 497 struct auth_hash *axf; 498 struct enc_xform *exf; 499 uint32_t *blkp; 500 int blksz, error, ivlen, len, r, resid; 501 502 swa = &ses->swcr_auth; 503 axf = swa->sw_axf; 504 505 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 506 blksz = GMAC_BLOCK_LEN; 507 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 508 __func__)); 509 510 swe = &ses->swcr_encdec; 511 exf = swe->sw_exf; 512 KASSERT(axf->blocksize == exf->native_blocksize, 513 ("%s: blocksize mismatch", __func__)); 514 515 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 516 return (EINVAL); 517 518 /* Initialize the IV */ 519 ivlen = AES_GCM_IV_LEN; 520 bcopy(crp->crp_iv, iv, ivlen); 521 522 /* Supply MAC with IV */ 523 axf->Reinit(&ctx, iv, ivlen); 524 525 /* Supply MAC with AAD */ 526 if (crp->crp_aad != NULL) { 527 len = rounddown(crp->crp_aad_length, blksz); 528 if (len != 0) 529 axf->Update(&ctx, crp->crp_aad, len); 530 if (crp->crp_aad_length != len) { 531 memset(blk, 0, blksz); 532 memcpy(blk, (char *)crp->crp_aad + len, 533 crp->crp_aad_length - len); 534 axf->Update(&ctx, blk, blksz); 535 } 536 } else { 537 crypto_cursor_init(&cc_in, &crp->crp_buf); 538 crypto_cursor_advance(&cc_in, crp->crp_aad_start); 539 for (resid = crp->crp_aad_length; resid >= blksz; 540 resid -= len) { 541 len = crypto_cursor_seglen(&cc_in); 542 if (len >= blksz) { 543 inblk = crypto_cursor_segbase(&cc_in); 544 len = rounddown(MIN(len, resid), blksz); 545 crypto_cursor_advance(&cc_in, len); 546 } else { 547 len = blksz; 548 crypto_cursor_copydata(&cc_in, len, blk); 549 inblk = blk; 550 } 551 axf->Update(&ctx, inblk, len); 552 } 553 if (resid > 0) { 554 memset(blk, 0, blksz); 555 crypto_cursor_copydata(&cc_in, resid, blk); 556 axf->Update(&ctx, blk, blksz); 557 } 558 } 559 560 exf->reinit(swe->sw_kschedule, iv); 561 562 /* Do encryption with MAC */ 563 crypto_cursor_init(&cc_in, &crp->crp_buf); 564 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 565 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 566 crypto_cursor_init(&cc_out, &crp->crp_obuf); 567 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 568 } else 569 cc_out = cc_in; 570 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 571 if (crypto_cursor_seglen(&cc_in) < blksz) { 572 crypto_cursor_copydata(&cc_in, blksz, blk); 573 inblk = blk; 574 } else { 575 inblk = crypto_cursor_segbase(&cc_in); 576 crypto_cursor_advance(&cc_in, blksz); 577 } 578 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 579 if (crypto_cursor_seglen(&cc_out) < blksz) 580 outblk = blk; 581 else 582 outblk = crypto_cursor_segbase(&cc_out); 583 exf->encrypt(swe->sw_kschedule, inblk, outblk); 584 axf->Update(&ctx, outblk, blksz); 585 if (outblk == blk) 586 crypto_cursor_copyback(&cc_out, blksz, blk); 587 else 588 crypto_cursor_advance(&cc_out, blksz); 589 } else { 590 axf->Update(&ctx, inblk, blksz); 591 } 592 } 593 if (resid > 0) { 594 crypto_cursor_copydata(&cc_in, resid, blk); 595 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 596 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 597 crypto_cursor_copyback(&cc_out, resid, blk); 598 } 599 axf->Update(&ctx, blk, resid); 600 } 601 602 /* length block */ 603 memset(blk, 0, blksz); 604 blkp = (uint32_t *)blk + 1; 605 *blkp = htobe32(crp->crp_aad_length * 8); 606 blkp = (uint32_t *)blk + 3; 607 *blkp = htobe32(crp->crp_payload_length * 8); 608 axf->Update(&ctx, blk, blksz); 609 610 /* Finalize MAC */ 611 axf->Final(tag, &ctx); 612 613 /* Validate tag */ 614 error = 0; 615 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 616 u_char tag2[GMAC_DIGEST_LEN]; 617 618 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, tag2); 619 620 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 621 explicit_bzero(tag2, sizeof(tag2)); 622 if (r != 0) { 623 error = EBADMSG; 624 goto out; 625 } 626 627 /* tag matches, decrypt data */ 628 crypto_cursor_init(&cc_in, &crp->crp_buf); 629 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 630 for (resid = crp->crp_payload_length; resid > blksz; 631 resid -= blksz) { 632 if (crypto_cursor_seglen(&cc_in) < blksz) { 633 crypto_cursor_copydata(&cc_in, blksz, blk); 634 inblk = blk; 635 } else { 636 inblk = crypto_cursor_segbase(&cc_in); 637 crypto_cursor_advance(&cc_in, blksz); 638 } 639 if (crypto_cursor_seglen(&cc_out) < blksz) 640 outblk = blk; 641 else 642 outblk = crypto_cursor_segbase(&cc_out); 643 exf->decrypt(swe->sw_kschedule, inblk, outblk); 644 if (outblk == blk) 645 crypto_cursor_copyback(&cc_out, blksz, blk); 646 else 647 crypto_cursor_advance(&cc_out, blksz); 648 } 649 if (resid > 0) { 650 crypto_cursor_copydata(&cc_in, resid, blk); 651 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 652 crypto_cursor_copyback(&cc_out, resid, blk); 653 } 654 } else { 655 /* Inject the authentication data */ 656 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 657 } 658 659 out: 660 explicit_bzero(blkbuf, sizeof(blkbuf)); 661 explicit_bzero(tag, sizeof(tag)); 662 explicit_bzero(iv, sizeof(iv)); 663 664 return (error); 665 } 666 667 static int 668 swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp) 669 { 670 u_char tag[AES_CBC_MAC_HASH_LEN]; 671 u_char iv[AES_BLOCK_LEN]; 672 union authctx ctx; 673 struct swcr_auth *swa; 674 struct auth_hash *axf; 675 int error, ivlen; 676 677 swa = &ses->swcr_auth; 678 axf = swa->sw_axf; 679 680 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 681 682 /* Initialize the IV */ 683 ivlen = AES_CCM_IV_LEN; 684 crypto_read_iv(crp, iv); 685 686 /* 687 * AES CCM-CBC-MAC needs to know the length of both the auth 688 * data and payload data before doing the auth computation. 689 */ 690 ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length; 691 ctx.aes_cbc_mac_ctx.cryptDataLength = 0; 692 693 axf->Reinit(&ctx, iv, ivlen); 694 if (crp->crp_aad != NULL) 695 error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 696 else 697 error = crypto_apply(crp, crp->crp_payload_start, 698 crp->crp_payload_length, axf->Update, &ctx); 699 if (error) 700 return (error); 701 702 /* Finalize MAC */ 703 axf->Final(tag, &ctx); 704 705 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) { 706 u_char tag2[AES_CBC_MAC_HASH_LEN]; 707 708 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 709 tag2); 710 if (timingsafe_bcmp(tag, tag2, swa->sw_mlen) != 0) 711 error = EBADMSG; 712 explicit_bzero(tag2, sizeof(tag)); 713 } else { 714 /* Inject the authentication data */ 715 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 716 } 717 explicit_bzero(tag, sizeof(tag)); 718 explicit_bzero(iv, sizeof(iv)); 719 return (error); 720 } 721 722 static int 723 swcr_ccm(struct swcr_session *ses, struct cryptop *crp) 724 { 725 uint32_t blkbuf[howmany(AES_BLOCK_LEN, sizeof(uint32_t))]; 726 u_char *blk = (u_char *)blkbuf; 727 u_char tag[AES_CBC_MAC_HASH_LEN]; 728 u_char iv[AES_BLOCK_LEN]; 729 struct crypto_buffer_cursor cc_in, cc_out; 730 const u_char *inblk; 731 u_char *outblk; 732 union authctx ctx; 733 struct swcr_auth *swa; 734 struct swcr_encdec *swe; 735 struct auth_hash *axf; 736 struct enc_xform *exf; 737 int blksz, error, ivlen, r, resid; 738 739 swa = &ses->swcr_auth; 740 axf = swa->sw_axf; 741 742 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 743 blksz = AES_BLOCK_LEN; 744 KASSERT(axf->blocksize == blksz, ("%s: axf block size mismatch", 745 __func__)); 746 747 swe = &ses->swcr_encdec; 748 exf = swe->sw_exf; 749 KASSERT(axf->blocksize == exf->native_blocksize, 750 ("%s: blocksize mismatch", __func__)); 751 752 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) 753 return (EINVAL); 754 755 /* Initialize the IV */ 756 ivlen = AES_CCM_IV_LEN; 757 bcopy(crp->crp_iv, iv, ivlen); 758 759 /* 760 * AES CCM-CBC-MAC needs to know the length of both the auth 761 * data and payload data before doing the auth computation. 762 */ 763 ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length; 764 ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length; 765 766 /* Supply MAC with IV */ 767 axf->Reinit(&ctx, iv, ivlen); 768 769 /* Supply MAC with AAD */ 770 if (crp->crp_aad != NULL) 771 error = axf->Update(&ctx, crp->crp_aad, crp->crp_aad_length); 772 else 773 error = crypto_apply(crp, crp->crp_aad_start, 774 crp->crp_aad_length, axf->Update, &ctx); 775 if (error) 776 return (error); 777 778 exf->reinit(swe->sw_kschedule, iv); 779 780 /* Do encryption/decryption with MAC */ 781 crypto_cursor_init(&cc_in, &crp->crp_buf); 782 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 783 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) { 784 crypto_cursor_init(&cc_out, &crp->crp_obuf); 785 crypto_cursor_advance(&cc_out, crp->crp_payload_output_start); 786 } else 787 cc_out = cc_in; 788 for (resid = crp->crp_payload_length; resid >= blksz; resid -= blksz) { 789 if (crypto_cursor_seglen(&cc_in) < blksz) { 790 crypto_cursor_copydata(&cc_in, blksz, blk); 791 inblk = blk; 792 } else { 793 inblk = crypto_cursor_segbase(&cc_in); 794 crypto_cursor_advance(&cc_in, blksz); 795 } 796 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 797 if (crypto_cursor_seglen(&cc_out) < blksz) 798 outblk = blk; 799 else 800 outblk = crypto_cursor_segbase(&cc_out); 801 axf->Update(&ctx, inblk, blksz); 802 exf->encrypt(swe->sw_kschedule, inblk, outblk); 803 if (outblk == blk) 804 crypto_cursor_copyback(&cc_out, blksz, blk); 805 else 806 crypto_cursor_advance(&cc_out, blksz); 807 } else { 808 /* 809 * One of the problems with CCM+CBC is that 810 * the authentication is done on the 811 * unencrypted data. As a result, we have to 812 * decrypt the data twice: once to generate 813 * the tag and a second time after the tag is 814 * verified. 815 */ 816 exf->decrypt(swe->sw_kschedule, inblk, blk); 817 axf->Update(&ctx, blk, blksz); 818 } 819 } 820 if (resid > 0) { 821 crypto_cursor_copydata(&cc_in, resid, blk); 822 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 823 axf->Update(&ctx, blk, resid); 824 exf->encrypt_last(swe->sw_kschedule, blk, blk, resid); 825 crypto_cursor_copyback(&cc_out, resid, blk); 826 } else { 827 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 828 axf->Update(&ctx, blk, resid); 829 } 830 } 831 832 /* Finalize MAC */ 833 axf->Final(tag, &ctx); 834 835 /* Validate tag */ 836 error = 0; 837 if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 838 u_char tag2[AES_CBC_MAC_HASH_LEN]; 839 840 crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen, 841 tag2); 842 843 r = timingsafe_bcmp(tag, tag2, swa->sw_mlen); 844 explicit_bzero(tag2, sizeof(tag2)); 845 if (r != 0) { 846 error = EBADMSG; 847 goto out; 848 } 849 850 /* tag matches, decrypt data */ 851 exf->reinit(swe->sw_kschedule, iv); 852 crypto_cursor_init(&cc_in, &crp->crp_buf); 853 crypto_cursor_advance(&cc_in, crp->crp_payload_start); 854 for (resid = crp->crp_payload_length; resid > blksz; 855 resid -= blksz) { 856 if (crypto_cursor_seglen(&cc_in) < blksz) { 857 crypto_cursor_copydata(&cc_in, blksz, blk); 858 inblk = blk; 859 } else { 860 inblk = crypto_cursor_segbase(&cc_in); 861 crypto_cursor_advance(&cc_in, blksz); 862 } 863 if (crypto_cursor_seglen(&cc_out) < blksz) 864 outblk = blk; 865 else 866 outblk = crypto_cursor_segbase(&cc_out); 867 exf->decrypt(swe->sw_kschedule, inblk, outblk); 868 if (outblk == blk) 869 crypto_cursor_copyback(&cc_out, blksz, blk); 870 else 871 crypto_cursor_advance(&cc_out, blksz); 872 } 873 if (resid > 0) { 874 crypto_cursor_copydata(&cc_in, resid, blk); 875 exf->decrypt_last(swe->sw_kschedule, blk, blk, resid); 876 crypto_cursor_copyback(&cc_out, resid, blk); 877 } 878 } else { 879 /* Inject the authentication data */ 880 crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, tag); 881 } 882 883 out: 884 explicit_bzero(blkbuf, sizeof(blkbuf)); 885 explicit_bzero(tag, sizeof(tag)); 886 explicit_bzero(iv, sizeof(iv)); 887 return (error); 888 } 889 890 /* 891 * Apply a cipher and a digest to perform EtA. 892 */ 893 static int 894 swcr_eta(struct swcr_session *ses, struct cryptop *crp) 895 { 896 int error; 897 898 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) { 899 error = swcr_encdec(ses, crp); 900 if (error == 0) 901 error = swcr_authcompute(ses, crp); 902 } else { 903 error = swcr_authcompute(ses, crp); 904 if (error == 0) 905 error = swcr_encdec(ses, crp); 906 } 907 return (error); 908 } 909 910 /* 911 * Apply a compression/decompression algorithm 912 */ 913 static int 914 swcr_compdec(struct swcr_session *ses, struct cryptop *crp) 915 { 916 u_int8_t *data, *out; 917 struct comp_algo *cxf; 918 int adj; 919 u_int32_t result; 920 921 cxf = ses->swcr_compdec.sw_cxf; 922 923 /* We must handle the whole buffer of data in one time 924 * then if there is not all the data in the mbuf, we must 925 * copy in a buffer. 926 */ 927 928 data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT); 929 if (data == NULL) 930 return (EINVAL); 931 crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length, 932 data); 933 934 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) 935 result = cxf->compress(data, crp->crp_payload_length, &out); 936 else 937 result = cxf->decompress(data, crp->crp_payload_length, &out); 938 939 free(data, M_CRYPTO_DATA); 940 if (result == 0) 941 return (EINVAL); 942 crp->crp_olen = result; 943 944 /* Check the compressed size when doing compression */ 945 if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) { 946 if (result >= crp->crp_payload_length) { 947 /* Compression was useless, we lost time */ 948 free(out, M_CRYPTO_DATA); 949 return (0); 950 } 951 } 952 953 /* Copy back the (de)compressed data. m_copyback is 954 * extending the mbuf as necessary. 955 */ 956 crypto_copyback(crp, crp->crp_payload_start, result, out); 957 if (result < crp->crp_payload_length) { 958 switch (crp->crp_buf.cb_type) { 959 case CRYPTO_BUF_MBUF: 960 adj = result - crp->crp_payload_length; 961 m_adj(crp->crp_buf.cb_mbuf, adj); 962 break; 963 case CRYPTO_BUF_UIO: { 964 struct uio *uio = crp->crp_buf.cb_uio; 965 int ind; 966 967 adj = crp->crp_payload_length - result; 968 ind = uio->uio_iovcnt - 1; 969 970 while (adj > 0 && ind >= 0) { 971 if (adj < uio->uio_iov[ind].iov_len) { 972 uio->uio_iov[ind].iov_len -= adj; 973 break; 974 } 975 976 adj -= uio->uio_iov[ind].iov_len; 977 uio->uio_iov[ind].iov_len = 0; 978 ind--; 979 uio->uio_iovcnt--; 980 } 981 } 982 break; 983 default: 984 break; 985 } 986 } 987 free(out, M_CRYPTO_DATA); 988 return 0; 989 } 990 991 static int 992 swcr_setup_cipher(struct swcr_session *ses, 993 const struct crypto_session_params *csp) 994 { 995 struct swcr_encdec *swe; 996 struct enc_xform *txf; 997 int error; 998 999 swe = &ses->swcr_encdec; 1000 txf = crypto_cipher(csp); 1001 MPASS(txf->ivsize == csp->csp_ivlen); 1002 if (txf->ctxsize != 0) { 1003 swe->sw_kschedule = malloc(txf->ctxsize, M_CRYPTO_DATA, 1004 M_NOWAIT); 1005 if (swe->sw_kschedule == NULL) 1006 return (ENOMEM); 1007 } 1008 if (csp->csp_cipher_key != NULL) { 1009 error = txf->setkey(swe->sw_kschedule, 1010 csp->csp_cipher_key, csp->csp_cipher_klen); 1011 if (error) 1012 return (error); 1013 } 1014 swe->sw_exf = txf; 1015 return (0); 1016 } 1017 1018 static int 1019 swcr_setup_auth(struct swcr_session *ses, 1020 const struct crypto_session_params *csp) 1021 { 1022 struct swcr_auth *swa; 1023 struct auth_hash *axf; 1024 1025 swa = &ses->swcr_auth; 1026 1027 axf = crypto_auth_hash(csp); 1028 swa->sw_axf = axf; 1029 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1030 return (EINVAL); 1031 if (csp->csp_auth_mlen == 0) 1032 swa->sw_mlen = axf->hashsize; 1033 else 1034 swa->sw_mlen = csp->csp_auth_mlen; 1035 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1036 if (swa->sw_ictx == NULL) 1037 return (ENOBUFS); 1038 1039 switch (csp->csp_auth_alg) { 1040 case CRYPTO_SHA1_HMAC: 1041 case CRYPTO_SHA2_224_HMAC: 1042 case CRYPTO_SHA2_256_HMAC: 1043 case CRYPTO_SHA2_384_HMAC: 1044 case CRYPTO_SHA2_512_HMAC: 1045 case CRYPTO_NULL_HMAC: 1046 case CRYPTO_RIPEMD160_HMAC: 1047 swa->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 1048 M_NOWAIT); 1049 if (swa->sw_octx == NULL) 1050 return (ENOBUFS); 1051 1052 if (csp->csp_auth_key != NULL) { 1053 swcr_authprepare(axf, swa, csp->csp_auth_key, 1054 csp->csp_auth_klen); 1055 } 1056 1057 if (csp->csp_mode == CSP_MODE_DIGEST) 1058 ses->swcr_process = swcr_authcompute; 1059 break; 1060 case CRYPTO_SHA1: 1061 case CRYPTO_SHA2_224: 1062 case CRYPTO_SHA2_256: 1063 case CRYPTO_SHA2_384: 1064 case CRYPTO_SHA2_512: 1065 axf->Init(swa->sw_ictx); 1066 if (csp->csp_mode == CSP_MODE_DIGEST) 1067 ses->swcr_process = swcr_authcompute; 1068 break; 1069 case CRYPTO_AES_NIST_GMAC: 1070 axf->Init(swa->sw_ictx); 1071 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1072 csp->csp_auth_klen); 1073 if (csp->csp_mode == CSP_MODE_DIGEST) 1074 ses->swcr_process = swcr_gmac; 1075 break; 1076 case CRYPTO_POLY1305: 1077 case CRYPTO_BLAKE2B: 1078 case CRYPTO_BLAKE2S: 1079 /* 1080 * Blake2b and Blake2s support an optional key but do 1081 * not require one. 1082 */ 1083 if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL) 1084 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1085 csp->csp_auth_klen); 1086 axf->Init(swa->sw_ictx); 1087 if (csp->csp_mode == CSP_MODE_DIGEST) 1088 ses->swcr_process = swcr_authcompute; 1089 break; 1090 case CRYPTO_AES_CCM_CBC_MAC: 1091 axf->Init(swa->sw_ictx); 1092 axf->Setkey(swa->sw_ictx, csp->csp_auth_key, 1093 csp->csp_auth_klen); 1094 if (csp->csp_mode == CSP_MODE_DIGEST) 1095 ses->swcr_process = swcr_ccm_cbc_mac; 1096 break; 1097 } 1098 1099 return (0); 1100 } 1101 1102 static int 1103 swcr_setup_gcm(struct swcr_session *ses, 1104 const struct crypto_session_params *csp) 1105 { 1106 struct swcr_auth *swa; 1107 struct auth_hash *axf; 1108 1109 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1110 return (EINVAL); 1111 1112 /* First, setup the auth side. */ 1113 swa = &ses->swcr_auth; 1114 switch (csp->csp_cipher_klen * 8) { 1115 case 128: 1116 axf = &auth_hash_nist_gmac_aes_128; 1117 break; 1118 case 192: 1119 axf = &auth_hash_nist_gmac_aes_192; 1120 break; 1121 case 256: 1122 axf = &auth_hash_nist_gmac_aes_256; 1123 break; 1124 default: 1125 return (EINVAL); 1126 } 1127 swa->sw_axf = axf; 1128 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1129 return (EINVAL); 1130 if (csp->csp_auth_mlen == 0) 1131 swa->sw_mlen = axf->hashsize; 1132 else 1133 swa->sw_mlen = csp->csp_auth_mlen; 1134 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1135 if (swa->sw_ictx == NULL) 1136 return (ENOBUFS); 1137 axf->Init(swa->sw_ictx); 1138 if (csp->csp_cipher_key != NULL) 1139 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, 1140 csp->csp_cipher_klen); 1141 1142 /* Second, setup the cipher side. */ 1143 return (swcr_setup_cipher(ses, csp)); 1144 } 1145 1146 static int 1147 swcr_setup_ccm(struct swcr_session *ses, 1148 const struct crypto_session_params *csp) 1149 { 1150 struct swcr_auth *swa; 1151 struct auth_hash *axf; 1152 1153 if (csp->csp_ivlen != AES_CCM_IV_LEN) 1154 return (EINVAL); 1155 1156 /* First, setup the auth side. */ 1157 swa = &ses->swcr_auth; 1158 switch (csp->csp_cipher_klen * 8) { 1159 case 128: 1160 axf = &auth_hash_ccm_cbc_mac_128; 1161 break; 1162 case 192: 1163 axf = &auth_hash_ccm_cbc_mac_192; 1164 break; 1165 case 256: 1166 axf = &auth_hash_ccm_cbc_mac_256; 1167 break; 1168 default: 1169 return (EINVAL); 1170 } 1171 swa->sw_axf = axf; 1172 if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize) 1173 return (EINVAL); 1174 if (csp->csp_auth_mlen == 0) 1175 swa->sw_mlen = axf->hashsize; 1176 else 1177 swa->sw_mlen = csp->csp_auth_mlen; 1178 swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT); 1179 if (swa->sw_ictx == NULL) 1180 return (ENOBUFS); 1181 axf->Init(swa->sw_ictx); 1182 if (csp->csp_cipher_key != NULL) 1183 axf->Setkey(swa->sw_ictx, csp->csp_cipher_key, 1184 csp->csp_cipher_klen); 1185 1186 /* Second, setup the cipher side. */ 1187 return (swcr_setup_cipher(ses, csp)); 1188 } 1189 1190 static bool 1191 swcr_auth_supported(const struct crypto_session_params *csp) 1192 { 1193 struct auth_hash *axf; 1194 1195 axf = crypto_auth_hash(csp); 1196 if (axf == NULL) 1197 return (false); 1198 switch (csp->csp_auth_alg) { 1199 case CRYPTO_SHA1_HMAC: 1200 case CRYPTO_SHA2_224_HMAC: 1201 case CRYPTO_SHA2_256_HMAC: 1202 case CRYPTO_SHA2_384_HMAC: 1203 case CRYPTO_SHA2_512_HMAC: 1204 case CRYPTO_NULL_HMAC: 1205 case CRYPTO_RIPEMD160_HMAC: 1206 break; 1207 case CRYPTO_AES_NIST_GMAC: 1208 switch (csp->csp_auth_klen * 8) { 1209 case 128: 1210 case 192: 1211 case 256: 1212 break; 1213 default: 1214 return (false); 1215 } 1216 if (csp->csp_auth_key == NULL) 1217 return (false); 1218 if (csp->csp_ivlen != AES_GCM_IV_LEN) 1219 return (false); 1220 break; 1221 case CRYPTO_POLY1305: 1222 if (csp->csp_auth_klen != POLY1305_KEY_LEN) 1223 return (false); 1224 break; 1225 case CRYPTO_AES_CCM_CBC_MAC: 1226 switch (csp->csp_auth_klen * 8) { 1227 case 128: 1228 case 192: 1229 case 256: 1230 break; 1231 default: 1232 return (false); 1233 } 1234 if (csp->csp_auth_key == NULL) 1235 return (false); 1236 if (csp->csp_ivlen != AES_CCM_IV_LEN) 1237 return (false); 1238 break; 1239 } 1240 return (true); 1241 } 1242 1243 static bool 1244 swcr_cipher_supported(const struct crypto_session_params *csp) 1245 { 1246 struct enc_xform *txf; 1247 1248 txf = crypto_cipher(csp); 1249 if (txf == NULL) 1250 return (false); 1251 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC && 1252 txf->ivsize != csp->csp_ivlen) 1253 return (false); 1254 return (true); 1255 } 1256 1257 static int 1258 swcr_probesession(device_t dev, const struct crypto_session_params *csp) 1259 { 1260 1261 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 1262 0) 1263 return (EINVAL); 1264 switch (csp->csp_mode) { 1265 case CSP_MODE_COMPRESS: 1266 switch (csp->csp_cipher_alg) { 1267 case CRYPTO_DEFLATE_COMP: 1268 break; 1269 default: 1270 return (EINVAL); 1271 } 1272 break; 1273 case CSP_MODE_CIPHER: 1274 switch (csp->csp_cipher_alg) { 1275 case CRYPTO_AES_NIST_GCM_16: 1276 case CRYPTO_AES_CCM_16: 1277 return (EINVAL); 1278 default: 1279 if (!swcr_cipher_supported(csp)) 1280 return (EINVAL); 1281 break; 1282 } 1283 break; 1284 case CSP_MODE_DIGEST: 1285 if (!swcr_auth_supported(csp)) 1286 return (EINVAL); 1287 break; 1288 case CSP_MODE_AEAD: 1289 switch (csp->csp_cipher_alg) { 1290 case CRYPTO_AES_NIST_GCM_16: 1291 case CRYPTO_AES_CCM_16: 1292 break; 1293 default: 1294 return (EINVAL); 1295 } 1296 break; 1297 case CSP_MODE_ETA: 1298 /* AEAD algorithms cannot be used for EtA. */ 1299 switch (csp->csp_cipher_alg) { 1300 case CRYPTO_AES_NIST_GCM_16: 1301 case CRYPTO_AES_CCM_16: 1302 return (EINVAL); 1303 } 1304 switch (csp->csp_auth_alg) { 1305 case CRYPTO_AES_NIST_GMAC: 1306 case CRYPTO_AES_CCM_CBC_MAC: 1307 return (EINVAL); 1308 } 1309 1310 if (!swcr_cipher_supported(csp) || 1311 !swcr_auth_supported(csp)) 1312 return (EINVAL); 1313 break; 1314 default: 1315 return (EINVAL); 1316 } 1317 1318 return (CRYPTODEV_PROBE_SOFTWARE); 1319 } 1320 1321 /* 1322 * Generate a new software session. 1323 */ 1324 static int 1325 swcr_newsession(device_t dev, crypto_session_t cses, 1326 const struct crypto_session_params *csp) 1327 { 1328 struct swcr_session *ses; 1329 struct swcr_encdec *swe; 1330 struct swcr_auth *swa; 1331 struct comp_algo *cxf; 1332 int error; 1333 1334 ses = crypto_get_driver_session(cses); 1335 mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF); 1336 1337 error = 0; 1338 swe = &ses->swcr_encdec; 1339 swa = &ses->swcr_auth; 1340 switch (csp->csp_mode) { 1341 case CSP_MODE_COMPRESS: 1342 switch (csp->csp_cipher_alg) { 1343 case CRYPTO_DEFLATE_COMP: 1344 cxf = &comp_algo_deflate; 1345 break; 1346 #ifdef INVARIANTS 1347 default: 1348 panic("bad compression algo"); 1349 #endif 1350 } 1351 ses->swcr_compdec.sw_cxf = cxf; 1352 ses->swcr_process = swcr_compdec; 1353 break; 1354 case CSP_MODE_CIPHER: 1355 switch (csp->csp_cipher_alg) { 1356 case CRYPTO_NULL_CBC: 1357 ses->swcr_process = swcr_null; 1358 break; 1359 #ifdef INVARIANTS 1360 case CRYPTO_AES_NIST_GCM_16: 1361 case CRYPTO_AES_CCM_16: 1362 panic("bad cipher algo"); 1363 #endif 1364 default: 1365 error = swcr_setup_cipher(ses, csp); 1366 if (error == 0) 1367 ses->swcr_process = swcr_encdec; 1368 } 1369 break; 1370 case CSP_MODE_DIGEST: 1371 error = swcr_setup_auth(ses, csp); 1372 break; 1373 case CSP_MODE_AEAD: 1374 switch (csp->csp_cipher_alg) { 1375 case CRYPTO_AES_NIST_GCM_16: 1376 error = swcr_setup_gcm(ses, csp); 1377 if (error == 0) 1378 ses->swcr_process = swcr_gcm; 1379 break; 1380 case CRYPTO_AES_CCM_16: 1381 error = swcr_setup_ccm(ses, csp); 1382 if (error == 0) 1383 ses->swcr_process = swcr_ccm; 1384 break; 1385 #ifdef INVARIANTS 1386 default: 1387 panic("bad aead algo"); 1388 #endif 1389 } 1390 break; 1391 case CSP_MODE_ETA: 1392 #ifdef INVARIANTS 1393 switch (csp->csp_cipher_alg) { 1394 case CRYPTO_AES_NIST_GCM_16: 1395 case CRYPTO_AES_CCM_16: 1396 panic("bad eta cipher algo"); 1397 } 1398 switch (csp->csp_auth_alg) { 1399 case CRYPTO_AES_NIST_GMAC: 1400 case CRYPTO_AES_CCM_CBC_MAC: 1401 panic("bad eta auth algo"); 1402 } 1403 #endif 1404 1405 error = swcr_setup_auth(ses, csp); 1406 if (error) 1407 break; 1408 if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) { 1409 /* Effectively degrade to digest mode. */ 1410 ses->swcr_process = swcr_authcompute; 1411 break; 1412 } 1413 1414 error = swcr_setup_cipher(ses, csp); 1415 if (error == 0) 1416 ses->swcr_process = swcr_eta; 1417 break; 1418 default: 1419 error = EINVAL; 1420 } 1421 1422 if (error) 1423 swcr_freesession(dev, cses); 1424 return (error); 1425 } 1426 1427 static void 1428 swcr_freesession(device_t dev, crypto_session_t cses) 1429 { 1430 struct swcr_session *ses; 1431 1432 ses = crypto_get_driver_session(cses); 1433 1434 mtx_destroy(&ses->swcr_lock); 1435 1436 zfree(ses->swcr_encdec.sw_kschedule, M_CRYPTO_DATA); 1437 zfree(ses->swcr_auth.sw_ictx, M_CRYPTO_DATA); 1438 zfree(ses->swcr_auth.sw_octx, M_CRYPTO_DATA); 1439 } 1440 1441 /* 1442 * Process a software request. 1443 */ 1444 static int 1445 swcr_process(device_t dev, struct cryptop *crp, int hint) 1446 { 1447 struct swcr_session *ses; 1448 1449 ses = crypto_get_driver_session(crp->crp_session); 1450 mtx_lock(&ses->swcr_lock); 1451 1452 crp->crp_etype = ses->swcr_process(ses, crp); 1453 1454 mtx_unlock(&ses->swcr_lock); 1455 crypto_done(crp); 1456 return (0); 1457 } 1458 1459 static void 1460 swcr_identify(driver_t *drv, device_t parent) 1461 { 1462 /* NB: order 10 is so we get attached after h/w devices */ 1463 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1464 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1465 panic("cryptosoft: could not attach"); 1466 } 1467 1468 static int 1469 swcr_probe(device_t dev) 1470 { 1471 device_set_desc(dev, "software crypto"); 1472 return (BUS_PROBE_NOWILDCARD); 1473 } 1474 1475 static int 1476 swcr_attach(device_t dev) 1477 { 1478 1479 swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session), 1480 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1481 if (swcr_id < 0) { 1482 device_printf(dev, "cannot initialize!"); 1483 return (ENXIO); 1484 } 1485 1486 return (0); 1487 } 1488 1489 static int 1490 swcr_detach(device_t dev) 1491 { 1492 crypto_unregister_all(swcr_id); 1493 return 0; 1494 } 1495 1496 static device_method_t swcr_methods[] = { 1497 DEVMETHOD(device_identify, swcr_identify), 1498 DEVMETHOD(device_probe, swcr_probe), 1499 DEVMETHOD(device_attach, swcr_attach), 1500 DEVMETHOD(device_detach, swcr_detach), 1501 1502 DEVMETHOD(cryptodev_probesession, swcr_probesession), 1503 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1504 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1505 DEVMETHOD(cryptodev_process, swcr_process), 1506 1507 {0, 0}, 1508 }; 1509 1510 static driver_t swcr_driver = { 1511 "cryptosoft", 1512 swcr_methods, 1513 0, /* NB: no softc */ 1514 }; 1515 static devclass_t swcr_devclass; 1516 1517 /* 1518 * NB: We explicitly reference the crypto module so we 1519 * get the necessary ordering when built as a loadable 1520 * module. This is required because we bundle the crypto 1521 * module code together with the cryptosoft driver (otherwise 1522 * normal module dependencies would handle things). 1523 */ 1524 extern int crypto_modevent(struct module *, int, void *); 1525 /* XXX where to attach */ 1526 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1527 MODULE_VERSION(cryptosoft, 1); 1528 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1529