1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 49 #include <crypto/blowfish/blowfish.h> 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 #include <opencrypto/cast.h> 53 #include <opencrypto/skipjack.h> 54 #include <sys/md5.h> 55 56 #include <opencrypto/cryptodev.h> 57 #include <opencrypto/cryptosoft.h> 58 #include <opencrypto/xform.h> 59 60 #include <sys/kobj.h> 61 #include <sys/bus.h> 62 #include "cryptodev_if.h" 63 64 static int32_t swcr_id; 65 static struct swcr_data **swcr_sessions = NULL; 66 static u_int32_t swcr_sesnum; 67 /* Protects swcr_sessions pointer, not data. */ 68 static struct rwlock swcr_sessions_lock; 69 70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 72 73 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 74 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 75 static int swcr_authenc(struct cryptop *crp); 76 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 77 static int swcr_freesession(device_t dev, u_int64_t tid); 78 static int swcr_freesession_locked(device_t dev, u_int64_t tid); 79 80 /* 81 * Apply a symmetric encryption/decryption algorithm. 82 */ 83 static int 84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 85 int flags) 86 { 87 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 88 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 89 struct enc_xform *exf; 90 int i, j, k, blks, ind, count, ivlen; 91 struct uio *uio, uiolcl; 92 struct iovec iovlcl[4]; 93 struct iovec *iov; 94 int iovcnt, iovalloc; 95 int error; 96 97 error = 0; 98 99 exf = sw->sw_exf; 100 blks = exf->blocksize; 101 ivlen = exf->ivsize; 102 103 /* Check for non-padded data */ 104 if (crd->crd_len % blks) 105 return EINVAL; 106 107 if (crd->crd_alg == CRYPTO_AES_ICM && 108 (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0) 109 return (EINVAL); 110 111 /* Initialize the IV */ 112 if (crd->crd_flags & CRD_F_ENCRYPT) { 113 /* IV explicitly provided ? */ 114 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 115 bcopy(crd->crd_iv, iv, ivlen); 116 else 117 arc4rand(iv, ivlen, 0); 118 119 /* Do we need to write the IV */ 120 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 121 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv); 122 123 } else { /* Decryption */ 124 /* IV explicitly provided ? */ 125 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 126 bcopy(crd->crd_iv, iv, ivlen); 127 else { 128 /* Get IV off buf */ 129 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv); 130 } 131 } 132 133 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 134 int error; 135 136 if (sw->sw_kschedule) 137 exf->zerokey(&(sw->sw_kschedule)); 138 139 error = exf->setkey(&sw->sw_kschedule, 140 crd->crd_key, crd->crd_klen / 8); 141 if (error) 142 return (error); 143 } 144 145 iov = iovlcl; 146 iovcnt = nitems(iovlcl); 147 iovalloc = 0; 148 uio = &uiolcl; 149 if ((flags & CRYPTO_F_IMBUF) != 0) { 150 error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt, 151 &iovalloc); 152 if (error) 153 return (error); 154 uio->uio_iov = iov; 155 uio->uio_iovcnt = iovcnt; 156 } else if ((flags & CRYPTO_F_IOV) != 0) 157 uio = (struct uio *)buf; 158 else { 159 iov[0].iov_base = buf; 160 iov[0].iov_len = crd->crd_skip + crd->crd_len; 161 uio->uio_iov = iov; 162 uio->uio_iovcnt = 1; 163 } 164 165 ivp = iv; 166 167 if (exf->reinit) { 168 /* 169 * xforms that provide a reinit method perform all IV 170 * handling themselves. 171 */ 172 exf->reinit(sw->sw_kschedule, iv); 173 } 174 175 count = crd->crd_skip; 176 ind = cuio_getptr(uio, count, &k); 177 if (ind == -1) { 178 error = EINVAL; 179 goto out; 180 } 181 182 i = crd->crd_len; 183 184 while (i > 0) { 185 /* 186 * If there's insufficient data at the end of 187 * an iovec, we have to do some copying. 188 */ 189 if (uio->uio_iov[ind].iov_len < k + blks && 190 uio->uio_iov[ind].iov_len != k) { 191 cuio_copydata(uio, count, blks, blk); 192 193 /* Actual encryption/decryption */ 194 if (exf->reinit) { 195 if (crd->crd_flags & CRD_F_ENCRYPT) { 196 exf->encrypt(sw->sw_kschedule, 197 blk); 198 } else { 199 exf->decrypt(sw->sw_kschedule, 200 blk); 201 } 202 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 203 /* XOR with previous block */ 204 for (j = 0; j < blks; j++) 205 blk[j] ^= ivp[j]; 206 207 exf->encrypt(sw->sw_kschedule, blk); 208 209 /* 210 * Keep encrypted block for XOR'ing 211 * with next block 212 */ 213 bcopy(blk, iv, blks); 214 ivp = iv; 215 } else { /* decrypt */ 216 /* 217 * Keep encrypted block for XOR'ing 218 * with next block 219 */ 220 nivp = (ivp == iv) ? iv2 : iv; 221 bcopy(blk, nivp, blks); 222 223 exf->decrypt(sw->sw_kschedule, blk); 224 225 /* XOR with previous block */ 226 for (j = 0; j < blks; j++) 227 blk[j] ^= ivp[j]; 228 229 ivp = nivp; 230 } 231 232 /* Copy back decrypted block */ 233 cuio_copyback(uio, count, blks, blk); 234 235 count += blks; 236 237 /* Advance pointer */ 238 ind = cuio_getptr(uio, count, &k); 239 if (ind == -1) { 240 error = EINVAL; 241 goto out; 242 } 243 244 i -= blks; 245 246 /* Could be done... */ 247 if (i == 0) 248 break; 249 } 250 251 /* 252 * Warning: idat may point to garbage here, but 253 * we only use it in the while() loop, only if 254 * there are indeed enough data. 255 */ 256 idat = (char *)uio->uio_iov[ind].iov_base + k; 257 258 while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { 259 if (exf->reinit) { 260 if (crd->crd_flags & CRD_F_ENCRYPT) { 261 exf->encrypt(sw->sw_kschedule, 262 idat); 263 } else { 264 exf->decrypt(sw->sw_kschedule, 265 idat); 266 } 267 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 268 /* XOR with previous block/IV */ 269 for (j = 0; j < blks; j++) 270 idat[j] ^= ivp[j]; 271 272 exf->encrypt(sw->sw_kschedule, idat); 273 ivp = idat; 274 } else { /* decrypt */ 275 /* 276 * Keep encrypted block to be used 277 * in next block's processing. 278 */ 279 nivp = (ivp == iv) ? iv2 : iv; 280 bcopy(idat, nivp, blks); 281 282 exf->decrypt(sw->sw_kschedule, idat); 283 284 /* XOR with previous block/IV */ 285 for (j = 0; j < blks; j++) 286 idat[j] ^= ivp[j]; 287 288 ivp = nivp; 289 } 290 291 idat += blks; 292 count += blks; 293 k += blks; 294 i -= blks; 295 } 296 297 /* 298 * Advance to the next iov if the end of the current iov 299 * is aligned with the end of a cipher block. 300 * Note that the code is equivalent to calling: 301 * ind = cuio_getptr(uio, count, &k); 302 */ 303 if (i > 0 && k == uio->uio_iov[ind].iov_len) { 304 k = 0; 305 ind++; 306 if (ind >= uio->uio_iovcnt) { 307 error = EINVAL; 308 goto out; 309 } 310 } 311 } 312 313 out: 314 if (iovalloc) 315 free(iov, M_CRYPTO_DATA); 316 317 return (error); 318 } 319 320 static void 321 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 322 int klen) 323 { 324 int k; 325 326 klen /= 8; 327 328 switch (axf->type) { 329 case CRYPTO_MD5_HMAC: 330 case CRYPTO_SHA1_HMAC: 331 case CRYPTO_SHA2_256_HMAC: 332 case CRYPTO_SHA2_384_HMAC: 333 case CRYPTO_SHA2_512_HMAC: 334 case CRYPTO_NULL_HMAC: 335 case CRYPTO_RIPEMD160_HMAC: 336 for (k = 0; k < klen; k++) 337 key[k] ^= HMAC_IPAD_VAL; 338 339 axf->Init(sw->sw_ictx); 340 axf->Update(sw->sw_ictx, key, klen); 341 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 342 343 for (k = 0; k < klen; k++) 344 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 345 346 axf->Init(sw->sw_octx); 347 axf->Update(sw->sw_octx, key, klen); 348 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 349 350 for (k = 0; k < klen; k++) 351 key[k] ^= HMAC_OPAD_VAL; 352 break; 353 case CRYPTO_MD5_KPDK: 354 case CRYPTO_SHA1_KPDK: 355 { 356 /* 357 * We need a buffer that can hold an md5 and a sha1 result 358 * just to throw it away. 359 * What we do here is the initial part of: 360 * ALGO( key, keyfill, .. ) 361 * adding the key to sw_ictx and abusing Final() to get the 362 * "keyfill" padding. 363 * In addition we abuse the sw_octx to save the key to have 364 * it to be able to append it at the end in swcr_authcompute(). 365 */ 366 u_char buf[SHA1_RESULTLEN]; 367 368 sw->sw_klen = klen; 369 bcopy(key, sw->sw_octx, klen); 370 axf->Init(sw->sw_ictx); 371 axf->Update(sw->sw_ictx, key, klen); 372 axf->Final(buf, sw->sw_ictx); 373 break; 374 } 375 default: 376 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 377 "doesn't use keys.\n", __func__, axf->type); 378 } 379 } 380 381 /* 382 * Compute keyed-hash authenticator. 383 */ 384 static int 385 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 386 int flags) 387 { 388 unsigned char aalg[HASH_MAX_LEN]; 389 struct auth_hash *axf; 390 union authctx ctx; 391 int err; 392 393 if (sw->sw_ictx == 0) 394 return EINVAL; 395 396 axf = sw->sw_axf; 397 398 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 399 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 400 401 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 402 403 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 404 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 405 if (err) 406 return err; 407 408 switch (sw->sw_alg) { 409 case CRYPTO_MD5_HMAC: 410 case CRYPTO_SHA1_HMAC: 411 case CRYPTO_SHA2_256_HMAC: 412 case CRYPTO_SHA2_384_HMAC: 413 case CRYPTO_SHA2_512_HMAC: 414 case CRYPTO_RIPEMD160_HMAC: 415 if (sw->sw_octx == NULL) 416 return EINVAL; 417 418 axf->Final(aalg, &ctx); 419 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 420 axf->Update(&ctx, aalg, axf->hashsize); 421 axf->Final(aalg, &ctx); 422 break; 423 424 case CRYPTO_MD5_KPDK: 425 case CRYPTO_SHA1_KPDK: 426 /* If we have no key saved, return error. */ 427 if (sw->sw_octx == NULL) 428 return EINVAL; 429 430 /* 431 * Add the trailing copy of the key (see comment in 432 * swcr_authprepare()) after the data: 433 * ALGO( .., key, algofill ) 434 * and let Final() do the proper, natural "algofill" 435 * padding. 436 */ 437 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 438 axf->Final(aalg, &ctx); 439 break; 440 441 case CRYPTO_NULL_HMAC: 442 axf->Final(aalg, &ctx); 443 break; 444 } 445 446 /* Inject the authentication data */ 447 crypto_copyback(flags, buf, crd->crd_inject, 448 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 449 return 0; 450 } 451 452 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 453 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 454 455 /* 456 * Apply a combined encryption-authentication transformation 457 */ 458 static int 459 swcr_authenc(struct cryptop *crp) 460 { 461 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; 462 u_char *blk = (u_char *)blkbuf; 463 u_char aalg[AALG_MAX_RESULT_LEN]; 464 u_char uaalg[AALG_MAX_RESULT_LEN]; 465 u_char iv[EALG_MAX_BLOCK_LEN]; 466 union authctx ctx; 467 struct cryptodesc *crd, *crda = NULL, *crde = NULL; 468 struct swcr_data *sw, *swa, *swe = NULL; 469 struct auth_hash *axf = NULL; 470 struct enc_xform *exf = NULL; 471 caddr_t buf = (caddr_t)crp->crp_buf; 472 uint32_t *blkp; 473 int aadlen, blksz, i, ivlen, len, iskip, oskip, r; 474 475 ivlen = blksz = iskip = oskip = 0; 476 477 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 478 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; 479 sw && sw->sw_alg != crd->crd_alg; 480 sw = sw->sw_next) 481 ; 482 if (sw == NULL) 483 return (EINVAL); 484 485 switch (sw->sw_alg) { 486 case CRYPTO_AES_NIST_GCM_16: 487 case CRYPTO_AES_NIST_GMAC: 488 swe = sw; 489 crde = crd; 490 exf = swe->sw_exf; 491 ivlen = 12; 492 break; 493 case CRYPTO_AES_128_NIST_GMAC: 494 case CRYPTO_AES_192_NIST_GMAC: 495 case CRYPTO_AES_256_NIST_GMAC: 496 swa = sw; 497 crda = crd; 498 axf = swa->sw_axf; 499 if (swa->sw_ictx == 0) 500 return (EINVAL); 501 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 502 blksz = axf->blocksize; 503 break; 504 default: 505 return (EINVAL); 506 } 507 } 508 if (crde == NULL || crda == NULL) 509 return (EINVAL); 510 511 if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 && 512 (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0) 513 return (EINVAL); 514 515 if (crde->crd_klen != crda->crd_klen) 516 return (EINVAL); 517 518 /* Initialize the IV */ 519 if (crde->crd_flags & CRD_F_ENCRYPT) { 520 /* IV explicitly provided ? */ 521 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 522 bcopy(crde->crd_iv, iv, ivlen); 523 else 524 arc4rand(iv, ivlen, 0); 525 526 /* Do we need to write the IV */ 527 if (!(crde->crd_flags & CRD_F_IV_PRESENT)) 528 crypto_copyback(crp->crp_flags, buf, crde->crd_inject, 529 ivlen, iv); 530 531 } else { /* Decryption */ 532 /* IV explicitly provided ? */ 533 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 534 bcopy(crde->crd_iv, iv, ivlen); 535 else { 536 /* Get IV off buf */ 537 crypto_copydata(crp->crp_flags, buf, crde->crd_inject, 538 ivlen, iv); 539 } 540 } 541 542 /* Supply MAC with IV */ 543 if (axf->Reinit) 544 axf->Reinit(&ctx, iv, ivlen); 545 546 /* Supply MAC with AAD */ 547 aadlen = crda->crd_len; 548 549 for (i = iskip; i < crda->crd_len; i += blksz) { 550 len = MIN(crda->crd_len - i, blksz - oskip); 551 crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len, 552 blk + oskip); 553 bzero(blk + len + oskip, blksz - len - oskip); 554 axf->Update(&ctx, blk, blksz); 555 oskip = 0; /* reset initial output offset */ 556 } 557 558 if (exf->reinit) 559 exf->reinit(swe->sw_kschedule, iv); 560 561 /* Do encryption/decryption with MAC */ 562 for (i = 0; i < crde->crd_len; i += blksz) { 563 len = MIN(crde->crd_len - i, blksz); 564 if (len < blksz) 565 bzero(blk, blksz); 566 crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len, 567 blk); 568 if (crde->crd_flags & CRD_F_ENCRYPT) { 569 exf->encrypt(swe->sw_kschedule, blk); 570 axf->Update(&ctx, blk, len); 571 crypto_copyback(crp->crp_flags, buf, 572 crde->crd_skip + i, len, blk); 573 } else { 574 axf->Update(&ctx, blk, len); 575 } 576 } 577 578 /* Do any required special finalization */ 579 switch (crda->crd_alg) { 580 case CRYPTO_AES_128_NIST_GMAC: 581 case CRYPTO_AES_192_NIST_GMAC: 582 case CRYPTO_AES_256_NIST_GMAC: 583 /* length block */ 584 bzero(blk, blksz); 585 blkp = (uint32_t *)blk + 1; 586 *blkp = htobe32(aadlen * 8); 587 blkp = (uint32_t *)blk + 3; 588 *blkp = htobe32(crde->crd_len * 8); 589 axf->Update(&ctx, blk, blksz); 590 break; 591 } 592 593 /* Finalize MAC */ 594 axf->Final(aalg, &ctx); 595 596 /* Validate tag */ 597 if (!(crde->crd_flags & CRD_F_ENCRYPT)) { 598 crypto_copydata(crp->crp_flags, buf, crda->crd_inject, 599 axf->hashsize, uaalg); 600 601 r = timingsafe_bcmp(aalg, uaalg, axf->hashsize); 602 if (r == 0) { 603 /* tag matches, decrypt data */ 604 for (i = 0; i < crde->crd_len; i += blksz) { 605 len = MIN(crde->crd_len - i, blksz); 606 if (len < blksz) 607 bzero(blk, blksz); 608 crypto_copydata(crp->crp_flags, buf, 609 crde->crd_skip + i, len, blk); 610 if (!(crde->crd_flags & CRD_F_ENCRYPT)) { 611 exf->decrypt(swe->sw_kschedule, blk); 612 } 613 crypto_copyback(crp->crp_flags, buf, 614 crde->crd_skip + i, len, blk); 615 } 616 } else 617 return (EBADMSG); 618 } else { 619 /* Inject the authentication data */ 620 crypto_copyback(crp->crp_flags, buf, crda->crd_inject, 621 axf->hashsize, aalg); 622 } 623 624 return (0); 625 } 626 627 /* 628 * Apply a compression/decompression algorithm 629 */ 630 static int 631 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 632 caddr_t buf, int flags) 633 { 634 u_int8_t *data, *out; 635 struct comp_algo *cxf; 636 int adj; 637 u_int32_t result; 638 639 cxf = sw->sw_cxf; 640 641 /* We must handle the whole buffer of data in one time 642 * then if there is not all the data in the mbuf, we must 643 * copy in a buffer. 644 */ 645 646 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 647 if (data == NULL) 648 return (EINVAL); 649 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 650 651 if (crd->crd_flags & CRD_F_COMP) 652 result = cxf->compress(data, crd->crd_len, &out); 653 else 654 result = cxf->decompress(data, crd->crd_len, &out); 655 656 free(data, M_CRYPTO_DATA); 657 if (result == 0) 658 return EINVAL; 659 660 /* Copy back the (de)compressed data. m_copyback is 661 * extending the mbuf as necessary. 662 */ 663 sw->sw_size = result; 664 /* Check the compressed size when doing compression */ 665 if (crd->crd_flags & CRD_F_COMP) { 666 if (result >= crd->crd_len) { 667 /* Compression was useless, we lost time */ 668 free(out, M_CRYPTO_DATA); 669 return 0; 670 } 671 } 672 673 crypto_copyback(flags, buf, crd->crd_skip, result, out); 674 if (result < crd->crd_len) { 675 adj = result - crd->crd_len; 676 if (flags & CRYPTO_F_IMBUF) { 677 adj = result - crd->crd_len; 678 m_adj((struct mbuf *)buf, adj); 679 } else if (flags & CRYPTO_F_IOV) { 680 struct uio *uio = (struct uio *)buf; 681 int ind; 682 683 adj = crd->crd_len - result; 684 ind = uio->uio_iovcnt - 1; 685 686 while (adj > 0 && ind >= 0) { 687 if (adj < uio->uio_iov[ind].iov_len) { 688 uio->uio_iov[ind].iov_len -= adj; 689 break; 690 } 691 692 adj -= uio->uio_iov[ind].iov_len; 693 uio->uio_iov[ind].iov_len = 0; 694 ind--; 695 uio->uio_iovcnt--; 696 } 697 } 698 } 699 free(out, M_CRYPTO_DATA); 700 return 0; 701 } 702 703 /* 704 * Generate a new software session. 705 */ 706 static int 707 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 708 { 709 struct swcr_data **swd; 710 struct auth_hash *axf; 711 struct enc_xform *txf; 712 struct comp_algo *cxf; 713 u_int32_t i; 714 int error; 715 716 if (sid == NULL || cri == NULL) 717 return EINVAL; 718 719 rw_wlock(&swcr_sessions_lock); 720 if (swcr_sessions) { 721 for (i = 1; i < swcr_sesnum; i++) 722 if (swcr_sessions[i] == NULL) 723 break; 724 } else 725 i = 1; /* NB: to silence compiler warning */ 726 727 if (swcr_sessions == NULL || i == swcr_sesnum) { 728 if (swcr_sessions == NULL) { 729 i = 1; /* We leave swcr_sessions[0] empty */ 730 swcr_sesnum = CRYPTO_SW_SESSIONS; 731 } else 732 swcr_sesnum *= 2; 733 734 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 735 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 736 if (swd == NULL) { 737 /* Reset session number */ 738 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 739 swcr_sesnum = 0; 740 else 741 swcr_sesnum /= 2; 742 rw_wunlock(&swcr_sessions_lock); 743 return ENOBUFS; 744 } 745 746 /* Copy existing sessions */ 747 if (swcr_sessions != NULL) { 748 bcopy(swcr_sessions, swd, 749 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 750 free(swcr_sessions, M_CRYPTO_DATA); 751 } 752 753 swcr_sessions = swd; 754 } 755 756 rw_downgrade(&swcr_sessions_lock); 757 swd = &swcr_sessions[i]; 758 *sid = i; 759 760 while (cri) { 761 *swd = malloc(sizeof(struct swcr_data), 762 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 763 if (*swd == NULL) { 764 swcr_freesession_locked(dev, i); 765 rw_runlock(&swcr_sessions_lock); 766 return ENOBUFS; 767 } 768 769 switch (cri->cri_alg) { 770 case CRYPTO_DES_CBC: 771 txf = &enc_xform_des; 772 goto enccommon; 773 case CRYPTO_3DES_CBC: 774 txf = &enc_xform_3des; 775 goto enccommon; 776 case CRYPTO_BLF_CBC: 777 txf = &enc_xform_blf; 778 goto enccommon; 779 case CRYPTO_CAST_CBC: 780 txf = &enc_xform_cast5; 781 goto enccommon; 782 case CRYPTO_SKIPJACK_CBC: 783 txf = &enc_xform_skipjack; 784 goto enccommon; 785 case CRYPTO_RIJNDAEL128_CBC: 786 txf = &enc_xform_rijndael128; 787 goto enccommon; 788 case CRYPTO_AES_XTS: 789 txf = &enc_xform_aes_xts; 790 goto enccommon; 791 case CRYPTO_AES_ICM: 792 txf = &enc_xform_aes_icm; 793 goto enccommon; 794 case CRYPTO_AES_NIST_GCM_16: 795 txf = &enc_xform_aes_nist_gcm; 796 goto enccommon; 797 case CRYPTO_AES_NIST_GMAC: 798 txf = &enc_xform_aes_nist_gmac; 799 (*swd)->sw_exf = txf; 800 break; 801 case CRYPTO_CAMELLIA_CBC: 802 txf = &enc_xform_camellia; 803 goto enccommon; 804 case CRYPTO_NULL_CBC: 805 txf = &enc_xform_null; 806 goto enccommon; 807 enccommon: 808 if (cri->cri_key != NULL) { 809 error = txf->setkey(&((*swd)->sw_kschedule), 810 cri->cri_key, cri->cri_klen / 8); 811 if (error) { 812 swcr_freesession_locked(dev, i); 813 rw_runlock(&swcr_sessions_lock); 814 return error; 815 } 816 } 817 (*swd)->sw_exf = txf; 818 break; 819 820 case CRYPTO_MD5_HMAC: 821 axf = &auth_hash_hmac_md5; 822 goto authcommon; 823 case CRYPTO_SHA1_HMAC: 824 axf = &auth_hash_hmac_sha1; 825 goto authcommon; 826 case CRYPTO_SHA2_256_HMAC: 827 axf = &auth_hash_hmac_sha2_256; 828 goto authcommon; 829 case CRYPTO_SHA2_384_HMAC: 830 axf = &auth_hash_hmac_sha2_384; 831 goto authcommon; 832 case CRYPTO_SHA2_512_HMAC: 833 axf = &auth_hash_hmac_sha2_512; 834 goto authcommon; 835 case CRYPTO_NULL_HMAC: 836 axf = &auth_hash_null; 837 goto authcommon; 838 case CRYPTO_RIPEMD160_HMAC: 839 axf = &auth_hash_hmac_ripemd_160; 840 authcommon: 841 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 842 M_NOWAIT); 843 if ((*swd)->sw_ictx == NULL) { 844 swcr_freesession_locked(dev, i); 845 rw_runlock(&swcr_sessions_lock); 846 return ENOBUFS; 847 } 848 849 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 850 M_NOWAIT); 851 if ((*swd)->sw_octx == NULL) { 852 swcr_freesession_locked(dev, i); 853 rw_runlock(&swcr_sessions_lock); 854 return ENOBUFS; 855 } 856 857 if (cri->cri_key != NULL) { 858 swcr_authprepare(axf, *swd, cri->cri_key, 859 cri->cri_klen); 860 } 861 862 (*swd)->sw_mlen = cri->cri_mlen; 863 (*swd)->sw_axf = axf; 864 break; 865 866 case CRYPTO_MD5_KPDK: 867 axf = &auth_hash_key_md5; 868 goto auth2common; 869 870 case CRYPTO_SHA1_KPDK: 871 axf = &auth_hash_key_sha1; 872 auth2common: 873 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 874 M_NOWAIT); 875 if ((*swd)->sw_ictx == NULL) { 876 swcr_freesession_locked(dev, i); 877 rw_runlock(&swcr_sessions_lock); 878 return ENOBUFS; 879 } 880 881 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 882 M_CRYPTO_DATA, M_NOWAIT); 883 if ((*swd)->sw_octx == NULL) { 884 swcr_freesession_locked(dev, i); 885 rw_runlock(&swcr_sessions_lock); 886 return ENOBUFS; 887 } 888 889 /* Store the key so we can "append" it to the payload */ 890 if (cri->cri_key != NULL) { 891 swcr_authprepare(axf, *swd, cri->cri_key, 892 cri->cri_klen); 893 } 894 895 (*swd)->sw_mlen = cri->cri_mlen; 896 (*swd)->sw_axf = axf; 897 break; 898 #ifdef notdef 899 case CRYPTO_MD5: 900 axf = &auth_hash_md5; 901 goto auth3common; 902 903 case CRYPTO_SHA1: 904 axf = &auth_hash_sha1; 905 auth3common: 906 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 907 M_NOWAIT); 908 if ((*swd)->sw_ictx == NULL) { 909 swcr_freesession_locked(dev, i); 910 rw_runlock(&swcr_sessions_lock); 911 return ENOBUFS; 912 } 913 914 axf->Init((*swd)->sw_ictx); 915 (*swd)->sw_mlen = cri->cri_mlen; 916 (*swd)->sw_axf = axf; 917 break; 918 #endif 919 920 case CRYPTO_AES_128_NIST_GMAC: 921 axf = &auth_hash_nist_gmac_aes_128; 922 goto auth4common; 923 924 case CRYPTO_AES_192_NIST_GMAC: 925 axf = &auth_hash_nist_gmac_aes_192; 926 goto auth4common; 927 928 case CRYPTO_AES_256_NIST_GMAC: 929 axf = &auth_hash_nist_gmac_aes_256; 930 auth4common: 931 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 932 M_NOWAIT); 933 if ((*swd)->sw_ictx == NULL) { 934 swcr_freesession_locked(dev, i); 935 rw_runlock(&swcr_sessions_lock); 936 return ENOBUFS; 937 } 938 axf->Init((*swd)->sw_ictx); 939 axf->Setkey((*swd)->sw_ictx, cri->cri_key, 940 cri->cri_klen / 8); 941 (*swd)->sw_axf = axf; 942 break; 943 944 case CRYPTO_DEFLATE_COMP: 945 cxf = &comp_algo_deflate; 946 (*swd)->sw_cxf = cxf; 947 break; 948 default: 949 swcr_freesession_locked(dev, i); 950 rw_runlock(&swcr_sessions_lock); 951 return EINVAL; 952 } 953 954 (*swd)->sw_alg = cri->cri_alg; 955 cri = cri->cri_next; 956 swd = &((*swd)->sw_next); 957 } 958 rw_runlock(&swcr_sessions_lock); 959 return 0; 960 } 961 962 static int 963 swcr_freesession(device_t dev, u_int64_t tid) 964 { 965 int error; 966 967 rw_rlock(&swcr_sessions_lock); 968 error = swcr_freesession_locked(dev, tid); 969 rw_runlock(&swcr_sessions_lock); 970 return error; 971 } 972 973 /* 974 * Free a session. 975 */ 976 static int 977 swcr_freesession_locked(device_t dev, u_int64_t tid) 978 { 979 struct swcr_data *swd; 980 struct enc_xform *txf; 981 struct auth_hash *axf; 982 struct comp_algo *cxf; 983 u_int32_t sid = CRYPTO_SESID2LID(tid); 984 985 if (sid > swcr_sesnum || swcr_sessions == NULL || 986 swcr_sessions[sid] == NULL) 987 return EINVAL; 988 989 /* Silently accept and return */ 990 if (sid == 0) 991 return 0; 992 993 while ((swd = swcr_sessions[sid]) != NULL) { 994 swcr_sessions[sid] = swd->sw_next; 995 996 switch (swd->sw_alg) { 997 case CRYPTO_DES_CBC: 998 case CRYPTO_3DES_CBC: 999 case CRYPTO_BLF_CBC: 1000 case CRYPTO_CAST_CBC: 1001 case CRYPTO_SKIPJACK_CBC: 1002 case CRYPTO_RIJNDAEL128_CBC: 1003 case CRYPTO_AES_XTS: 1004 case CRYPTO_AES_ICM: 1005 case CRYPTO_AES_NIST_GCM_16: 1006 case CRYPTO_AES_NIST_GMAC: 1007 case CRYPTO_CAMELLIA_CBC: 1008 case CRYPTO_NULL_CBC: 1009 txf = swd->sw_exf; 1010 1011 if (swd->sw_kschedule) 1012 txf->zerokey(&(swd->sw_kschedule)); 1013 break; 1014 1015 case CRYPTO_MD5_HMAC: 1016 case CRYPTO_SHA1_HMAC: 1017 case CRYPTO_SHA2_256_HMAC: 1018 case CRYPTO_SHA2_384_HMAC: 1019 case CRYPTO_SHA2_512_HMAC: 1020 case CRYPTO_RIPEMD160_HMAC: 1021 case CRYPTO_NULL_HMAC: 1022 axf = swd->sw_axf; 1023 1024 if (swd->sw_ictx) { 1025 bzero(swd->sw_ictx, axf->ctxsize); 1026 free(swd->sw_ictx, M_CRYPTO_DATA); 1027 } 1028 if (swd->sw_octx) { 1029 bzero(swd->sw_octx, axf->ctxsize); 1030 free(swd->sw_octx, M_CRYPTO_DATA); 1031 } 1032 break; 1033 1034 case CRYPTO_MD5_KPDK: 1035 case CRYPTO_SHA1_KPDK: 1036 axf = swd->sw_axf; 1037 1038 if (swd->sw_ictx) { 1039 bzero(swd->sw_ictx, axf->ctxsize); 1040 free(swd->sw_ictx, M_CRYPTO_DATA); 1041 } 1042 if (swd->sw_octx) { 1043 bzero(swd->sw_octx, swd->sw_klen); 1044 free(swd->sw_octx, M_CRYPTO_DATA); 1045 } 1046 break; 1047 1048 case CRYPTO_MD5: 1049 case CRYPTO_SHA1: 1050 axf = swd->sw_axf; 1051 1052 if (swd->sw_ictx) 1053 free(swd->sw_ictx, M_CRYPTO_DATA); 1054 break; 1055 1056 case CRYPTO_DEFLATE_COMP: 1057 cxf = swd->sw_cxf; 1058 break; 1059 } 1060 1061 free(swd, M_CRYPTO_DATA); 1062 } 1063 return 0; 1064 } 1065 1066 /* 1067 * Process a software request. 1068 */ 1069 static int 1070 swcr_process(device_t dev, struct cryptop *crp, int hint) 1071 { 1072 struct cryptodesc *crd; 1073 struct swcr_data *sw; 1074 u_int32_t lid; 1075 1076 /* Sanity check */ 1077 if (crp == NULL) 1078 return EINVAL; 1079 1080 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 1081 crp->crp_etype = EINVAL; 1082 goto done; 1083 } 1084 1085 lid = CRYPTO_SESID2LID(crp->crp_sid); 1086 rw_rlock(&swcr_sessions_lock); 1087 if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 || 1088 swcr_sessions[lid] == NULL) { 1089 rw_runlock(&swcr_sessions_lock); 1090 crp->crp_etype = ENOENT; 1091 goto done; 1092 } 1093 rw_runlock(&swcr_sessions_lock); 1094 1095 /* Go through crypto descriptors, processing as we go */ 1096 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1097 /* 1098 * Find the crypto context. 1099 * 1100 * XXX Note that the logic here prevents us from having 1101 * XXX the same algorithm multiple times in a session 1102 * XXX (or rather, we can but it won't give us the right 1103 * XXX results). To do that, we'd need some way of differentiating 1104 * XXX between the various instances of an algorithm (so we can 1105 * XXX locate the correct crypto context). 1106 */ 1107 rw_rlock(&swcr_sessions_lock); 1108 if (swcr_sessions == NULL) { 1109 rw_runlock(&swcr_sessions_lock); 1110 crp->crp_etype = ENOENT; 1111 goto done; 1112 } 1113 for (sw = swcr_sessions[lid]; 1114 sw && sw->sw_alg != crd->crd_alg; 1115 sw = sw->sw_next) 1116 ; 1117 rw_runlock(&swcr_sessions_lock); 1118 1119 /* No such context ? */ 1120 if (sw == NULL) { 1121 crp->crp_etype = EINVAL; 1122 goto done; 1123 } 1124 switch (sw->sw_alg) { 1125 case CRYPTO_DES_CBC: 1126 case CRYPTO_3DES_CBC: 1127 case CRYPTO_BLF_CBC: 1128 case CRYPTO_CAST_CBC: 1129 case CRYPTO_SKIPJACK_CBC: 1130 case CRYPTO_RIJNDAEL128_CBC: 1131 case CRYPTO_AES_XTS: 1132 case CRYPTO_AES_ICM: 1133 case CRYPTO_CAMELLIA_CBC: 1134 if ((crp->crp_etype = swcr_encdec(crd, sw, 1135 crp->crp_buf, crp->crp_flags)) != 0) 1136 goto done; 1137 break; 1138 case CRYPTO_NULL_CBC: 1139 crp->crp_etype = 0; 1140 break; 1141 case CRYPTO_MD5_HMAC: 1142 case CRYPTO_SHA1_HMAC: 1143 case CRYPTO_SHA2_256_HMAC: 1144 case CRYPTO_SHA2_384_HMAC: 1145 case CRYPTO_SHA2_512_HMAC: 1146 case CRYPTO_RIPEMD160_HMAC: 1147 case CRYPTO_NULL_HMAC: 1148 case CRYPTO_MD5_KPDK: 1149 case CRYPTO_SHA1_KPDK: 1150 case CRYPTO_MD5: 1151 case CRYPTO_SHA1: 1152 if ((crp->crp_etype = swcr_authcompute(crd, sw, 1153 crp->crp_buf, crp->crp_flags)) != 0) 1154 goto done; 1155 break; 1156 1157 case CRYPTO_AES_NIST_GCM_16: 1158 case CRYPTO_AES_NIST_GMAC: 1159 case CRYPTO_AES_128_NIST_GMAC: 1160 case CRYPTO_AES_192_NIST_GMAC: 1161 case CRYPTO_AES_256_NIST_GMAC: 1162 crp->crp_etype = swcr_authenc(crp); 1163 goto done; 1164 1165 case CRYPTO_DEFLATE_COMP: 1166 if ((crp->crp_etype = swcr_compdec(crd, sw, 1167 crp->crp_buf, crp->crp_flags)) != 0) 1168 goto done; 1169 else 1170 crp->crp_olen = (int)sw->sw_size; 1171 break; 1172 1173 default: 1174 /* Unknown/unsupported algorithm */ 1175 crp->crp_etype = EINVAL; 1176 goto done; 1177 } 1178 } 1179 1180 done: 1181 crypto_done(crp); 1182 return 0; 1183 } 1184 1185 static void 1186 swcr_identify(driver_t *drv, device_t parent) 1187 { 1188 /* NB: order 10 is so we get attached after h/w devices */ 1189 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1190 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1191 panic("cryptosoft: could not attach"); 1192 } 1193 1194 static int 1195 swcr_probe(device_t dev) 1196 { 1197 device_set_desc(dev, "software crypto"); 1198 return (BUS_PROBE_NOWILDCARD); 1199 } 1200 1201 static int 1202 swcr_attach(device_t dev) 1203 { 1204 rw_init(&swcr_sessions_lock, "swcr_sessions_lock"); 1205 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 1206 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 1207 1208 swcr_id = crypto_get_driverid(dev, 1209 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1210 if (swcr_id < 0) { 1211 device_printf(dev, "cannot initialize!"); 1212 return ENOMEM; 1213 } 1214 #define REGISTER(alg) \ 1215 crypto_register(swcr_id, alg, 0,0) 1216 REGISTER(CRYPTO_DES_CBC); 1217 REGISTER(CRYPTO_3DES_CBC); 1218 REGISTER(CRYPTO_BLF_CBC); 1219 REGISTER(CRYPTO_CAST_CBC); 1220 REGISTER(CRYPTO_SKIPJACK_CBC); 1221 REGISTER(CRYPTO_NULL_CBC); 1222 REGISTER(CRYPTO_MD5_HMAC); 1223 REGISTER(CRYPTO_SHA1_HMAC); 1224 REGISTER(CRYPTO_SHA2_256_HMAC); 1225 REGISTER(CRYPTO_SHA2_384_HMAC); 1226 REGISTER(CRYPTO_SHA2_512_HMAC); 1227 REGISTER(CRYPTO_RIPEMD160_HMAC); 1228 REGISTER(CRYPTO_NULL_HMAC); 1229 REGISTER(CRYPTO_MD5_KPDK); 1230 REGISTER(CRYPTO_SHA1_KPDK); 1231 REGISTER(CRYPTO_MD5); 1232 REGISTER(CRYPTO_SHA1); 1233 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1234 REGISTER(CRYPTO_AES_XTS); 1235 REGISTER(CRYPTO_AES_ICM); 1236 REGISTER(CRYPTO_AES_NIST_GCM_16); 1237 REGISTER(CRYPTO_AES_NIST_GMAC); 1238 REGISTER(CRYPTO_AES_128_NIST_GMAC); 1239 REGISTER(CRYPTO_AES_192_NIST_GMAC); 1240 REGISTER(CRYPTO_AES_256_NIST_GMAC); 1241 REGISTER(CRYPTO_CAMELLIA_CBC); 1242 REGISTER(CRYPTO_DEFLATE_COMP); 1243 #undef REGISTER 1244 1245 return 0; 1246 } 1247 1248 static int 1249 swcr_detach(device_t dev) 1250 { 1251 crypto_unregister_all(swcr_id); 1252 rw_wlock(&swcr_sessions_lock); 1253 free(swcr_sessions, M_CRYPTO_DATA); 1254 swcr_sessions = NULL; 1255 rw_wunlock(&swcr_sessions_lock); 1256 rw_destroy(&swcr_sessions_lock); 1257 return 0; 1258 } 1259 1260 static device_method_t swcr_methods[] = { 1261 DEVMETHOD(device_identify, swcr_identify), 1262 DEVMETHOD(device_probe, swcr_probe), 1263 DEVMETHOD(device_attach, swcr_attach), 1264 DEVMETHOD(device_detach, swcr_detach), 1265 1266 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1267 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1268 DEVMETHOD(cryptodev_process, swcr_process), 1269 1270 {0, 0}, 1271 }; 1272 1273 static driver_t swcr_driver = { 1274 "cryptosoft", 1275 swcr_methods, 1276 0, /* NB: no softc */ 1277 }; 1278 static devclass_t swcr_devclass; 1279 1280 /* 1281 * NB: We explicitly reference the crypto module so we 1282 * get the necessary ordering when built as a loadable 1283 * module. This is required because we bundle the crypto 1284 * module code together with the cryptosoft driver (otherwise 1285 * normal module dependencies would handle things). 1286 */ 1287 extern int crypto_modevent(struct module *, int, void *); 1288 /* XXX where to attach */ 1289 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1290 MODULE_VERSION(cryptosoft, 1); 1291 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1292