1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 49 #include <crypto/blowfish/blowfish.h> 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 #include <opencrypto/cast.h> 53 #include <opencrypto/skipjack.h> 54 #include <sys/md5.h> 55 56 #include <opencrypto/cryptodev.h> 57 #include <opencrypto/cryptosoft.h> 58 #include <opencrypto/xform.h> 59 60 #include <sys/kobj.h> 61 #include <sys/bus.h> 62 #include "cryptodev_if.h" 63 64 static int32_t swcr_id; 65 static struct swcr_data **swcr_sessions = NULL; 66 static u_int32_t swcr_sesnum; 67 /* Protects swcr_sessions pointer, not data. */ 68 static struct rwlock swcr_sessions_lock; 69 70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 72 73 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 74 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 75 static int swcr_authenc(struct cryptop *crp); 76 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 77 static int swcr_freesession(device_t dev, u_int64_t tid); 78 static int swcr_freesession_locked(device_t dev, u_int64_t tid); 79 80 /* 81 * Apply a symmetric encryption/decryption algorithm. 82 */ 83 static int 84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 85 int flags) 86 { 87 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 88 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 89 struct enc_xform *exf; 90 int i, j, k, blks, ind, count, ivlen; 91 struct uio *uio, uiolcl; 92 struct iovec iovlcl[4]; 93 struct iovec *iov; 94 int iovcnt, iovalloc; 95 int error; 96 97 error = 0; 98 99 exf = sw->sw_exf; 100 blks = exf->blocksize; 101 ivlen = exf->ivsize; 102 103 /* Check for non-padded data */ 104 if (crd->crd_len % blks) 105 return EINVAL; 106 107 if (crd->crd_alg == CRYPTO_AES_ICM && 108 (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0) 109 return (EINVAL); 110 111 /* Initialize the IV */ 112 if (crd->crd_flags & CRD_F_ENCRYPT) { 113 /* IV explicitly provided ? */ 114 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 115 bcopy(crd->crd_iv, iv, ivlen); 116 else 117 arc4rand(iv, ivlen, 0); 118 119 /* Do we need to write the IV */ 120 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 121 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv); 122 123 } else { /* Decryption */ 124 /* IV explicitly provided ? */ 125 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 126 bcopy(crd->crd_iv, iv, ivlen); 127 else { 128 /* Get IV off buf */ 129 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv); 130 } 131 } 132 133 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 134 int error; 135 136 if (sw->sw_kschedule) 137 exf->zerokey(&(sw->sw_kschedule)); 138 139 error = exf->setkey(&sw->sw_kschedule, 140 crd->crd_key, crd->crd_klen / 8); 141 if (error) 142 return (error); 143 } 144 145 iov = iovlcl; 146 iovcnt = nitems(iovlcl); 147 iovalloc = 0; 148 uio = &uiolcl; 149 if ((flags & CRYPTO_F_IMBUF) != 0) { 150 error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt, 151 &iovalloc); 152 if (error) 153 return (error); 154 uio->uio_iov = iov; 155 uio->uio_iovcnt = iovcnt; 156 } else if ((flags & CRYPTO_F_IOV) != 0) 157 uio = (struct uio *)buf; 158 else { 159 iov[0].iov_base = buf; 160 iov[0].iov_len = crd->crd_skip + crd->crd_len; 161 uio->uio_iov = iov; 162 uio->uio_iovcnt = 1; 163 } 164 165 ivp = iv; 166 167 if (exf->reinit) { 168 /* 169 * xforms that provide a reinit method perform all IV 170 * handling themselves. 171 */ 172 exf->reinit(sw->sw_kschedule, iv); 173 } 174 175 count = crd->crd_skip; 176 ind = cuio_getptr(uio, count, &k); 177 if (ind == -1) { 178 error = EINVAL; 179 goto out; 180 } 181 182 i = crd->crd_len; 183 184 while (i > 0) { 185 /* 186 * If there's insufficient data at the end of 187 * an iovec, we have to do some copying. 188 */ 189 if (uio->uio_iov[ind].iov_len < k + blks && 190 uio->uio_iov[ind].iov_len != k) { 191 cuio_copydata(uio, count, blks, blk); 192 193 /* Actual encryption/decryption */ 194 if (exf->reinit) { 195 if (crd->crd_flags & CRD_F_ENCRYPT) { 196 exf->encrypt(sw->sw_kschedule, 197 blk); 198 } else { 199 exf->decrypt(sw->sw_kschedule, 200 blk); 201 } 202 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 203 /* XOR with previous block */ 204 for (j = 0; j < blks; j++) 205 blk[j] ^= ivp[j]; 206 207 exf->encrypt(sw->sw_kschedule, blk); 208 209 /* 210 * Keep encrypted block for XOR'ing 211 * with next block 212 */ 213 bcopy(blk, iv, blks); 214 ivp = iv; 215 } else { /* decrypt */ 216 /* 217 * Keep encrypted block for XOR'ing 218 * with next block 219 */ 220 nivp = (ivp == iv) ? iv2 : iv; 221 bcopy(blk, nivp, blks); 222 223 exf->decrypt(sw->sw_kschedule, blk); 224 225 /* XOR with previous block */ 226 for (j = 0; j < blks; j++) 227 blk[j] ^= ivp[j]; 228 229 ivp = nivp; 230 } 231 232 /* Copy back decrypted block */ 233 cuio_copyback(uio, count, blks, blk); 234 235 count += blks; 236 237 /* Advance pointer */ 238 ind = cuio_getptr(uio, count, &k); 239 if (ind == -1) { 240 error = EINVAL; 241 goto out; 242 } 243 244 i -= blks; 245 246 /* Could be done... */ 247 if (i == 0) 248 break; 249 } 250 251 /* 252 * Warning: idat may point to garbage here, but 253 * we only use it in the while() loop, only if 254 * there are indeed enough data. 255 */ 256 idat = (char *)uio->uio_iov[ind].iov_base + k; 257 258 while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { 259 if (exf->reinit) { 260 if (crd->crd_flags & CRD_F_ENCRYPT) { 261 exf->encrypt(sw->sw_kschedule, 262 idat); 263 } else { 264 exf->decrypt(sw->sw_kschedule, 265 idat); 266 } 267 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 268 /* XOR with previous block/IV */ 269 for (j = 0; j < blks; j++) 270 idat[j] ^= ivp[j]; 271 272 exf->encrypt(sw->sw_kschedule, idat); 273 ivp = idat; 274 } else { /* decrypt */ 275 /* 276 * Keep encrypted block to be used 277 * in next block's processing. 278 */ 279 nivp = (ivp == iv) ? iv2 : iv; 280 bcopy(idat, nivp, blks); 281 282 exf->decrypt(sw->sw_kschedule, idat); 283 284 /* XOR with previous block/IV */ 285 for (j = 0; j < blks; j++) 286 idat[j] ^= ivp[j]; 287 288 ivp = nivp; 289 } 290 291 idat += blks; 292 count += blks; 293 k += blks; 294 i -= blks; 295 } 296 297 /* 298 * Advance to the next iov if the end of the current iov 299 * is aligned with the end of a cipher block. 300 * Note that the code is equivalent to calling: 301 * ind = cuio_getptr(uio, count, &k); 302 */ 303 if (i > 0 && k == uio->uio_iov[ind].iov_len) { 304 k = 0; 305 ind++; 306 if (ind >= uio->uio_iovcnt) { 307 error = EINVAL; 308 goto out; 309 } 310 } 311 } 312 313 out: 314 if (iovalloc) 315 free(iov, M_CRYPTO_DATA); 316 317 return (error); 318 } 319 320 static void 321 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 322 int klen) 323 { 324 int k; 325 326 klen /= 8; 327 328 switch (axf->type) { 329 case CRYPTO_MD5_HMAC: 330 case CRYPTO_SHA1_HMAC: 331 case CRYPTO_SHA2_256_HMAC: 332 case CRYPTO_SHA2_384_HMAC: 333 case CRYPTO_SHA2_512_HMAC: 334 case CRYPTO_NULL_HMAC: 335 case CRYPTO_RIPEMD160_HMAC: 336 for (k = 0; k < klen; k++) 337 key[k] ^= HMAC_IPAD_VAL; 338 339 axf->Init(sw->sw_ictx); 340 axf->Update(sw->sw_ictx, key, klen); 341 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 342 343 for (k = 0; k < klen; k++) 344 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 345 346 axf->Init(sw->sw_octx); 347 axf->Update(sw->sw_octx, key, klen); 348 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 349 350 for (k = 0; k < klen; k++) 351 key[k] ^= HMAC_OPAD_VAL; 352 break; 353 case CRYPTO_MD5_KPDK: 354 case CRYPTO_SHA1_KPDK: 355 { 356 /* 357 * We need a buffer that can hold an md5 and a sha1 result 358 * just to throw it away. 359 * What we do here is the initial part of: 360 * ALGO( key, keyfill, .. ) 361 * adding the key to sw_ictx and abusing Final() to get the 362 * "keyfill" padding. 363 * In addition we abuse the sw_octx to save the key to have 364 * it to be able to append it at the end in swcr_authcompute(). 365 */ 366 u_char buf[SHA1_RESULTLEN]; 367 368 sw->sw_klen = klen; 369 bcopy(key, sw->sw_octx, klen); 370 axf->Init(sw->sw_ictx); 371 axf->Update(sw->sw_ictx, key, klen); 372 axf->Final(buf, sw->sw_ictx); 373 break; 374 } 375 default: 376 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 377 "doesn't use keys.\n", __func__, axf->type); 378 } 379 } 380 381 /* 382 * Compute keyed-hash authenticator. 383 */ 384 static int 385 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 386 int flags) 387 { 388 unsigned char aalg[HASH_MAX_LEN]; 389 struct auth_hash *axf; 390 union authctx ctx; 391 int err; 392 393 if (sw->sw_ictx == 0) 394 return EINVAL; 395 396 axf = sw->sw_axf; 397 398 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 399 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 400 401 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 402 403 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 404 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 405 if (err) 406 return err; 407 408 switch (sw->sw_alg) { 409 case CRYPTO_MD5_HMAC: 410 case CRYPTO_SHA1_HMAC: 411 case CRYPTO_SHA2_256_HMAC: 412 case CRYPTO_SHA2_384_HMAC: 413 case CRYPTO_SHA2_512_HMAC: 414 case CRYPTO_RIPEMD160_HMAC: 415 if (sw->sw_octx == NULL) 416 return EINVAL; 417 418 axf->Final(aalg, &ctx); 419 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 420 axf->Update(&ctx, aalg, axf->hashsize); 421 axf->Final(aalg, &ctx); 422 break; 423 424 case CRYPTO_MD5_KPDK: 425 case CRYPTO_SHA1_KPDK: 426 /* If we have no key saved, return error. */ 427 if (sw->sw_octx == NULL) 428 return EINVAL; 429 430 /* 431 * Add the trailing copy of the key (see comment in 432 * swcr_authprepare()) after the data: 433 * ALGO( .., key, algofill ) 434 * and let Final() do the proper, natural "algofill" 435 * padding. 436 */ 437 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 438 axf->Final(aalg, &ctx); 439 break; 440 441 case CRYPTO_NULL_HMAC: 442 axf->Final(aalg, &ctx); 443 break; 444 } 445 446 /* Inject the authentication data */ 447 crypto_copyback(flags, buf, crd->crd_inject, 448 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 449 return 0; 450 } 451 452 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 453 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 454 455 /* 456 * Apply a combined encryption-authentication transformation 457 */ 458 static int 459 swcr_authenc(struct cryptop *crp) 460 { 461 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; 462 u_char *blk = (u_char *)blkbuf; 463 u_char aalg[AALG_MAX_RESULT_LEN]; 464 u_char uaalg[AALG_MAX_RESULT_LEN]; 465 u_char iv[EALG_MAX_BLOCK_LEN]; 466 union authctx ctx; 467 struct cryptodesc *crd, *crda = NULL, *crde = NULL; 468 struct swcr_data *sw, *swa, *swe = NULL; 469 struct auth_hash *axf = NULL; 470 struct enc_xform *exf = NULL; 471 caddr_t buf = (caddr_t)crp->crp_buf; 472 uint32_t *blkp; 473 int aadlen, blksz, i, ivlen, len, iskip, oskip, r; 474 475 ivlen = blksz = iskip = oskip = 0; 476 477 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 478 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; 479 sw && sw->sw_alg != crd->crd_alg; 480 sw = sw->sw_next) 481 ; 482 if (sw == NULL) 483 return (EINVAL); 484 485 switch (sw->sw_alg) { 486 case CRYPTO_AES_NIST_GCM_16: 487 case CRYPTO_AES_NIST_GMAC: 488 swe = sw; 489 crde = crd; 490 exf = swe->sw_exf; 491 ivlen = 12; 492 break; 493 case CRYPTO_AES_128_NIST_GMAC: 494 case CRYPTO_AES_192_NIST_GMAC: 495 case CRYPTO_AES_256_NIST_GMAC: 496 swa = sw; 497 crda = crd; 498 axf = swa->sw_axf; 499 if (swa->sw_ictx == 0) 500 return (EINVAL); 501 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 502 blksz = axf->blocksize; 503 break; 504 default: 505 return (EINVAL); 506 } 507 } 508 if (crde == NULL || crda == NULL) 509 return (EINVAL); 510 511 if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 && 512 (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0) 513 return (EINVAL); 514 515 if (crde->crd_klen != crda->crd_klen) 516 return (EINVAL); 517 518 /* Initialize the IV */ 519 if (crde->crd_flags & CRD_F_ENCRYPT) { 520 /* IV explicitly provided ? */ 521 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 522 bcopy(crde->crd_iv, iv, ivlen); 523 else 524 arc4rand(iv, ivlen, 0); 525 526 /* Do we need to write the IV */ 527 if (!(crde->crd_flags & CRD_F_IV_PRESENT)) 528 crypto_copyback(crp->crp_flags, buf, crde->crd_inject, 529 ivlen, iv); 530 531 } else { /* Decryption */ 532 /* IV explicitly provided ? */ 533 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 534 bcopy(crde->crd_iv, iv, ivlen); 535 else { 536 /* Get IV off buf */ 537 crypto_copydata(crp->crp_flags, buf, crde->crd_inject, 538 ivlen, iv); 539 } 540 } 541 542 /* Supply MAC with IV */ 543 if (axf->Reinit) 544 axf->Reinit(&ctx, iv, ivlen); 545 546 /* Supply MAC with AAD */ 547 aadlen = crda->crd_len; 548 549 for (i = iskip; i < crda->crd_len; i += blksz) { 550 len = MIN(crda->crd_len - i, blksz - oskip); 551 crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len, 552 blk + oskip); 553 bzero(blk + len + oskip, blksz - len - oskip); 554 axf->Update(&ctx, blk, blksz); 555 oskip = 0; /* reset initial output offset */ 556 } 557 558 if (exf->reinit) 559 exf->reinit(swe->sw_kschedule, iv); 560 561 /* Do encryption/decryption with MAC */ 562 for (i = 0; i < crde->crd_len; i += blksz) { 563 len = MIN(crde->crd_len - i, blksz); 564 if (len < blksz) 565 bzero(blk, blksz); 566 crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len, 567 blk); 568 if (crde->crd_flags & CRD_F_ENCRYPT) { 569 exf->encrypt(swe->sw_kschedule, blk); 570 axf->Update(&ctx, blk, len); 571 crypto_copyback(crp->crp_flags, buf, 572 crde->crd_skip + i, len, blk); 573 } else { 574 axf->Update(&ctx, blk, len); 575 } 576 } 577 578 /* Do any required special finalization */ 579 switch (crda->crd_alg) { 580 case CRYPTO_AES_128_NIST_GMAC: 581 case CRYPTO_AES_192_NIST_GMAC: 582 case CRYPTO_AES_256_NIST_GMAC: 583 /* length block */ 584 bzero(blk, blksz); 585 blkp = (uint32_t *)blk + 1; 586 *blkp = htobe32(aadlen * 8); 587 blkp = (uint32_t *)blk + 3; 588 *blkp = htobe32(crde->crd_len * 8); 589 axf->Update(&ctx, blk, blksz); 590 break; 591 } 592 593 /* Finalize MAC */ 594 axf->Final(aalg, &ctx); 595 596 /* Validate tag */ 597 if (!(crde->crd_flags & CRD_F_ENCRYPT)) { 598 crypto_copydata(crp->crp_flags, buf, crda->crd_inject, 599 axf->hashsize, uaalg); 600 601 r = timingsafe_bcmp(aalg, uaalg, axf->hashsize); 602 if (r == 0) { 603 /* tag matches, decrypt data */ 604 for (i = 0; i < crde->crd_len; i += blksz) { 605 len = MIN(crde->crd_len - i, blksz); 606 if (len < blksz) 607 bzero(blk, blksz); 608 crypto_copydata(crp->crp_flags, buf, 609 crde->crd_skip + i, len, blk); 610 if (!(crde->crd_flags & CRD_F_ENCRYPT)) { 611 exf->decrypt(swe->sw_kschedule, blk); 612 } 613 crypto_copyback(crp->crp_flags, buf, 614 crde->crd_skip + i, len, blk); 615 } 616 } else 617 return (EBADMSG); 618 } else { 619 /* Inject the authentication data */ 620 crypto_copyback(crp->crp_flags, buf, crda->crd_inject, 621 axf->hashsize, aalg); 622 } 623 624 return (0); 625 } 626 627 /* 628 * Apply a compression/decompression algorithm 629 */ 630 static int 631 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 632 caddr_t buf, int flags) 633 { 634 u_int8_t *data, *out; 635 struct comp_algo *cxf; 636 int adj; 637 u_int32_t result; 638 639 cxf = sw->sw_cxf; 640 641 /* We must handle the whole buffer of data in one time 642 * then if there is not all the data in the mbuf, we must 643 * copy in a buffer. 644 */ 645 646 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 647 if (data == NULL) 648 return (EINVAL); 649 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 650 651 if (crd->crd_flags & CRD_F_COMP) 652 result = cxf->compress(data, crd->crd_len, &out); 653 else 654 result = cxf->decompress(data, crd->crd_len, &out); 655 656 free(data, M_CRYPTO_DATA); 657 if (result == 0) 658 return EINVAL; 659 660 /* Copy back the (de)compressed data. m_copyback is 661 * extending the mbuf as necessary. 662 */ 663 sw->sw_size = result; 664 /* Check the compressed size when doing compression */ 665 if (crd->crd_flags & CRD_F_COMP) { 666 if (result >= crd->crd_len) { 667 /* Compression was useless, we lost time */ 668 free(out, M_CRYPTO_DATA); 669 return 0; 670 } 671 } 672 673 crypto_copyback(flags, buf, crd->crd_skip, result, out); 674 if (result < crd->crd_len) { 675 adj = result - crd->crd_len; 676 if (flags & CRYPTO_F_IMBUF) { 677 adj = result - crd->crd_len; 678 m_adj((struct mbuf *)buf, adj); 679 } else if (flags & CRYPTO_F_IOV) { 680 struct uio *uio = (struct uio *)buf; 681 int ind; 682 683 adj = crd->crd_len - result; 684 ind = uio->uio_iovcnt - 1; 685 686 while (adj > 0 && ind >= 0) { 687 if (adj < uio->uio_iov[ind].iov_len) { 688 uio->uio_iov[ind].iov_len -= adj; 689 break; 690 } 691 692 adj -= uio->uio_iov[ind].iov_len; 693 uio->uio_iov[ind].iov_len = 0; 694 ind--; 695 uio->uio_iovcnt--; 696 } 697 } 698 } 699 free(out, M_CRYPTO_DATA); 700 return 0; 701 } 702 703 /* 704 * Generate a new software session. 705 */ 706 static int 707 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 708 { 709 struct swcr_data **swd; 710 struct auth_hash *axf; 711 struct enc_xform *txf; 712 struct comp_algo *cxf; 713 u_int32_t i; 714 int len; 715 int error; 716 717 if (sid == NULL || cri == NULL) 718 return EINVAL; 719 720 rw_wlock(&swcr_sessions_lock); 721 if (swcr_sessions) { 722 for (i = 1; i < swcr_sesnum; i++) 723 if (swcr_sessions[i] == NULL) 724 break; 725 } else 726 i = 1; /* NB: to silence compiler warning */ 727 728 if (swcr_sessions == NULL || i == swcr_sesnum) { 729 if (swcr_sessions == NULL) { 730 i = 1; /* We leave swcr_sessions[0] empty */ 731 swcr_sesnum = CRYPTO_SW_SESSIONS; 732 } else 733 swcr_sesnum *= 2; 734 735 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 736 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 737 if (swd == NULL) { 738 /* Reset session number */ 739 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 740 swcr_sesnum = 0; 741 else 742 swcr_sesnum /= 2; 743 rw_wunlock(&swcr_sessions_lock); 744 return ENOBUFS; 745 } 746 747 /* Copy existing sessions */ 748 if (swcr_sessions != NULL) { 749 bcopy(swcr_sessions, swd, 750 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 751 free(swcr_sessions, M_CRYPTO_DATA); 752 } 753 754 swcr_sessions = swd; 755 } 756 757 rw_downgrade(&swcr_sessions_lock); 758 swd = &swcr_sessions[i]; 759 *sid = i; 760 761 while (cri) { 762 *swd = malloc(sizeof(struct swcr_data), 763 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 764 if (*swd == NULL) { 765 swcr_freesession_locked(dev, i); 766 rw_runlock(&swcr_sessions_lock); 767 return ENOBUFS; 768 } 769 770 switch (cri->cri_alg) { 771 case CRYPTO_DES_CBC: 772 txf = &enc_xform_des; 773 goto enccommon; 774 case CRYPTO_3DES_CBC: 775 txf = &enc_xform_3des; 776 goto enccommon; 777 case CRYPTO_BLF_CBC: 778 txf = &enc_xform_blf; 779 goto enccommon; 780 case CRYPTO_CAST_CBC: 781 txf = &enc_xform_cast5; 782 goto enccommon; 783 case CRYPTO_SKIPJACK_CBC: 784 txf = &enc_xform_skipjack; 785 goto enccommon; 786 case CRYPTO_RIJNDAEL128_CBC: 787 txf = &enc_xform_rijndael128; 788 goto enccommon; 789 case CRYPTO_AES_XTS: 790 txf = &enc_xform_aes_xts; 791 goto enccommon; 792 case CRYPTO_AES_ICM: 793 txf = &enc_xform_aes_icm; 794 goto enccommon; 795 case CRYPTO_AES_NIST_GCM_16: 796 txf = &enc_xform_aes_nist_gcm; 797 goto enccommon; 798 case CRYPTO_AES_NIST_GMAC: 799 txf = &enc_xform_aes_nist_gmac; 800 (*swd)->sw_exf = txf; 801 break; 802 case CRYPTO_CAMELLIA_CBC: 803 txf = &enc_xform_camellia; 804 goto enccommon; 805 case CRYPTO_NULL_CBC: 806 txf = &enc_xform_null; 807 goto enccommon; 808 enccommon: 809 if (cri->cri_key != NULL) { 810 error = txf->setkey(&((*swd)->sw_kschedule), 811 cri->cri_key, cri->cri_klen / 8); 812 if (error) { 813 swcr_freesession_locked(dev, i); 814 rw_runlock(&swcr_sessions_lock); 815 return error; 816 } 817 } 818 (*swd)->sw_exf = txf; 819 break; 820 821 case CRYPTO_MD5_HMAC: 822 axf = &auth_hash_hmac_md5; 823 goto authcommon; 824 case CRYPTO_SHA1_HMAC: 825 axf = &auth_hash_hmac_sha1; 826 goto authcommon; 827 case CRYPTO_SHA2_256_HMAC: 828 axf = &auth_hash_hmac_sha2_256; 829 goto authcommon; 830 case CRYPTO_SHA2_384_HMAC: 831 axf = &auth_hash_hmac_sha2_384; 832 goto authcommon; 833 case CRYPTO_SHA2_512_HMAC: 834 axf = &auth_hash_hmac_sha2_512; 835 goto authcommon; 836 case CRYPTO_NULL_HMAC: 837 axf = &auth_hash_null; 838 goto authcommon; 839 case CRYPTO_RIPEMD160_HMAC: 840 axf = &auth_hash_hmac_ripemd_160; 841 authcommon: 842 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 843 M_NOWAIT); 844 if ((*swd)->sw_ictx == NULL) { 845 swcr_freesession_locked(dev, i); 846 rw_runlock(&swcr_sessions_lock); 847 return ENOBUFS; 848 } 849 850 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 851 M_NOWAIT); 852 if ((*swd)->sw_octx == NULL) { 853 swcr_freesession_locked(dev, i); 854 rw_runlock(&swcr_sessions_lock); 855 return ENOBUFS; 856 } 857 858 if (cri->cri_key != NULL) { 859 swcr_authprepare(axf, *swd, cri->cri_key, 860 cri->cri_klen); 861 } 862 863 (*swd)->sw_mlen = cri->cri_mlen; 864 (*swd)->sw_axf = axf; 865 break; 866 867 case CRYPTO_MD5_KPDK: 868 axf = &auth_hash_key_md5; 869 goto auth2common; 870 871 case CRYPTO_SHA1_KPDK: 872 axf = &auth_hash_key_sha1; 873 auth2common: 874 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 875 M_NOWAIT); 876 if ((*swd)->sw_ictx == NULL) { 877 swcr_freesession_locked(dev, i); 878 rw_runlock(&swcr_sessions_lock); 879 return ENOBUFS; 880 } 881 882 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 883 M_CRYPTO_DATA, M_NOWAIT); 884 if ((*swd)->sw_octx == NULL) { 885 swcr_freesession_locked(dev, i); 886 rw_runlock(&swcr_sessions_lock); 887 return ENOBUFS; 888 } 889 890 /* Store the key so we can "append" it to the payload */ 891 if (cri->cri_key != NULL) { 892 swcr_authprepare(axf, *swd, cri->cri_key, 893 cri->cri_klen); 894 } 895 896 (*swd)->sw_mlen = cri->cri_mlen; 897 (*swd)->sw_axf = axf; 898 break; 899 #ifdef notdef 900 case CRYPTO_MD5: 901 axf = &auth_hash_md5; 902 goto auth3common; 903 904 case CRYPTO_SHA1: 905 axf = &auth_hash_sha1; 906 auth3common: 907 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 908 M_NOWAIT); 909 if ((*swd)->sw_ictx == NULL) { 910 swcr_freesession_locked(dev, i); 911 rw_runlock(&swcr_sessions_lock); 912 return ENOBUFS; 913 } 914 915 axf->Init((*swd)->sw_ictx); 916 (*swd)->sw_mlen = cri->cri_mlen; 917 (*swd)->sw_axf = axf; 918 break; 919 #endif 920 921 case CRYPTO_AES_128_NIST_GMAC: 922 axf = &auth_hash_nist_gmac_aes_128; 923 goto auth4common; 924 925 case CRYPTO_AES_192_NIST_GMAC: 926 axf = &auth_hash_nist_gmac_aes_192; 927 goto auth4common; 928 929 case CRYPTO_AES_256_NIST_GMAC: 930 axf = &auth_hash_nist_gmac_aes_256; 931 auth4common: 932 len = cri->cri_klen / 8; 933 if (len != 16 && len != 24 && len != 32) 934 return EINVAL; 935 936 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 937 M_NOWAIT); 938 if ((*swd)->sw_ictx == NULL) { 939 swcr_freesession_locked(dev, i); 940 rw_runlock(&swcr_sessions_lock); 941 return ENOBUFS; 942 } 943 axf->Init((*swd)->sw_ictx); 944 axf->Setkey((*swd)->sw_ictx, cri->cri_key, len); 945 (*swd)->sw_axf = axf; 946 break; 947 948 case CRYPTO_DEFLATE_COMP: 949 cxf = &comp_algo_deflate; 950 (*swd)->sw_cxf = cxf; 951 break; 952 default: 953 swcr_freesession_locked(dev, i); 954 rw_runlock(&swcr_sessions_lock); 955 return EINVAL; 956 } 957 958 (*swd)->sw_alg = cri->cri_alg; 959 cri = cri->cri_next; 960 swd = &((*swd)->sw_next); 961 } 962 rw_runlock(&swcr_sessions_lock); 963 return 0; 964 } 965 966 static int 967 swcr_freesession(device_t dev, u_int64_t tid) 968 { 969 int error; 970 971 rw_rlock(&swcr_sessions_lock); 972 error = swcr_freesession_locked(dev, tid); 973 rw_runlock(&swcr_sessions_lock); 974 return error; 975 } 976 977 /* 978 * Free a session. 979 */ 980 static int 981 swcr_freesession_locked(device_t dev, u_int64_t tid) 982 { 983 struct swcr_data *swd; 984 struct enc_xform *txf; 985 struct auth_hash *axf; 986 struct comp_algo *cxf; 987 u_int32_t sid = CRYPTO_SESID2LID(tid); 988 989 if (sid > swcr_sesnum || swcr_sessions == NULL || 990 swcr_sessions[sid] == NULL) 991 return EINVAL; 992 993 /* Silently accept and return */ 994 if (sid == 0) 995 return 0; 996 997 while ((swd = swcr_sessions[sid]) != NULL) { 998 swcr_sessions[sid] = swd->sw_next; 999 1000 switch (swd->sw_alg) { 1001 case CRYPTO_DES_CBC: 1002 case CRYPTO_3DES_CBC: 1003 case CRYPTO_BLF_CBC: 1004 case CRYPTO_CAST_CBC: 1005 case CRYPTO_SKIPJACK_CBC: 1006 case CRYPTO_RIJNDAEL128_CBC: 1007 case CRYPTO_AES_XTS: 1008 case CRYPTO_AES_ICM: 1009 case CRYPTO_AES_NIST_GCM_16: 1010 case CRYPTO_AES_NIST_GMAC: 1011 case CRYPTO_CAMELLIA_CBC: 1012 case CRYPTO_NULL_CBC: 1013 txf = swd->sw_exf; 1014 1015 if (swd->sw_kschedule) 1016 txf->zerokey(&(swd->sw_kschedule)); 1017 break; 1018 1019 case CRYPTO_MD5_HMAC: 1020 case CRYPTO_SHA1_HMAC: 1021 case CRYPTO_SHA2_256_HMAC: 1022 case CRYPTO_SHA2_384_HMAC: 1023 case CRYPTO_SHA2_512_HMAC: 1024 case CRYPTO_RIPEMD160_HMAC: 1025 case CRYPTO_NULL_HMAC: 1026 axf = swd->sw_axf; 1027 1028 if (swd->sw_ictx) { 1029 bzero(swd->sw_ictx, axf->ctxsize); 1030 free(swd->sw_ictx, M_CRYPTO_DATA); 1031 } 1032 if (swd->sw_octx) { 1033 bzero(swd->sw_octx, axf->ctxsize); 1034 free(swd->sw_octx, M_CRYPTO_DATA); 1035 } 1036 break; 1037 1038 case CRYPTO_MD5_KPDK: 1039 case CRYPTO_SHA1_KPDK: 1040 axf = swd->sw_axf; 1041 1042 if (swd->sw_ictx) { 1043 bzero(swd->sw_ictx, axf->ctxsize); 1044 free(swd->sw_ictx, M_CRYPTO_DATA); 1045 } 1046 if (swd->sw_octx) { 1047 bzero(swd->sw_octx, swd->sw_klen); 1048 free(swd->sw_octx, M_CRYPTO_DATA); 1049 } 1050 break; 1051 1052 case CRYPTO_MD5: 1053 case CRYPTO_SHA1: 1054 axf = swd->sw_axf; 1055 1056 if (swd->sw_ictx) 1057 free(swd->sw_ictx, M_CRYPTO_DATA); 1058 break; 1059 1060 case CRYPTO_DEFLATE_COMP: 1061 cxf = swd->sw_cxf; 1062 break; 1063 } 1064 1065 free(swd, M_CRYPTO_DATA); 1066 } 1067 return 0; 1068 } 1069 1070 /* 1071 * Process a software request. 1072 */ 1073 static int 1074 swcr_process(device_t dev, struct cryptop *crp, int hint) 1075 { 1076 struct cryptodesc *crd; 1077 struct swcr_data *sw; 1078 u_int32_t lid; 1079 1080 /* Sanity check */ 1081 if (crp == NULL) 1082 return EINVAL; 1083 1084 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 1085 crp->crp_etype = EINVAL; 1086 goto done; 1087 } 1088 1089 lid = CRYPTO_SESID2LID(crp->crp_sid); 1090 rw_rlock(&swcr_sessions_lock); 1091 if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 || 1092 swcr_sessions[lid] == NULL) { 1093 rw_runlock(&swcr_sessions_lock); 1094 crp->crp_etype = ENOENT; 1095 goto done; 1096 } 1097 rw_runlock(&swcr_sessions_lock); 1098 1099 /* Go through crypto descriptors, processing as we go */ 1100 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1101 /* 1102 * Find the crypto context. 1103 * 1104 * XXX Note that the logic here prevents us from having 1105 * XXX the same algorithm multiple times in a session 1106 * XXX (or rather, we can but it won't give us the right 1107 * XXX results). To do that, we'd need some way of differentiating 1108 * XXX between the various instances of an algorithm (so we can 1109 * XXX locate the correct crypto context). 1110 */ 1111 rw_rlock(&swcr_sessions_lock); 1112 if (swcr_sessions == NULL) { 1113 rw_runlock(&swcr_sessions_lock); 1114 crp->crp_etype = ENOENT; 1115 goto done; 1116 } 1117 for (sw = swcr_sessions[lid]; 1118 sw && sw->sw_alg != crd->crd_alg; 1119 sw = sw->sw_next) 1120 ; 1121 rw_runlock(&swcr_sessions_lock); 1122 1123 /* No such context ? */ 1124 if (sw == NULL) { 1125 crp->crp_etype = EINVAL; 1126 goto done; 1127 } 1128 switch (sw->sw_alg) { 1129 case CRYPTO_DES_CBC: 1130 case CRYPTO_3DES_CBC: 1131 case CRYPTO_BLF_CBC: 1132 case CRYPTO_CAST_CBC: 1133 case CRYPTO_SKIPJACK_CBC: 1134 case CRYPTO_RIJNDAEL128_CBC: 1135 case CRYPTO_AES_XTS: 1136 case CRYPTO_AES_ICM: 1137 case CRYPTO_CAMELLIA_CBC: 1138 if ((crp->crp_etype = swcr_encdec(crd, sw, 1139 crp->crp_buf, crp->crp_flags)) != 0) 1140 goto done; 1141 break; 1142 case CRYPTO_NULL_CBC: 1143 crp->crp_etype = 0; 1144 break; 1145 case CRYPTO_MD5_HMAC: 1146 case CRYPTO_SHA1_HMAC: 1147 case CRYPTO_SHA2_256_HMAC: 1148 case CRYPTO_SHA2_384_HMAC: 1149 case CRYPTO_SHA2_512_HMAC: 1150 case CRYPTO_RIPEMD160_HMAC: 1151 case CRYPTO_NULL_HMAC: 1152 case CRYPTO_MD5_KPDK: 1153 case CRYPTO_SHA1_KPDK: 1154 case CRYPTO_MD5: 1155 case CRYPTO_SHA1: 1156 if ((crp->crp_etype = swcr_authcompute(crd, sw, 1157 crp->crp_buf, crp->crp_flags)) != 0) 1158 goto done; 1159 break; 1160 1161 case CRYPTO_AES_NIST_GCM_16: 1162 case CRYPTO_AES_NIST_GMAC: 1163 case CRYPTO_AES_128_NIST_GMAC: 1164 case CRYPTO_AES_192_NIST_GMAC: 1165 case CRYPTO_AES_256_NIST_GMAC: 1166 crp->crp_etype = swcr_authenc(crp); 1167 goto done; 1168 1169 case CRYPTO_DEFLATE_COMP: 1170 if ((crp->crp_etype = swcr_compdec(crd, sw, 1171 crp->crp_buf, crp->crp_flags)) != 0) 1172 goto done; 1173 else 1174 crp->crp_olen = (int)sw->sw_size; 1175 break; 1176 1177 default: 1178 /* Unknown/unsupported algorithm */ 1179 crp->crp_etype = EINVAL; 1180 goto done; 1181 } 1182 } 1183 1184 done: 1185 crypto_done(crp); 1186 return 0; 1187 } 1188 1189 static void 1190 swcr_identify(driver_t *drv, device_t parent) 1191 { 1192 /* NB: order 10 is so we get attached after h/w devices */ 1193 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1194 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1195 panic("cryptosoft: could not attach"); 1196 } 1197 1198 static int 1199 swcr_probe(device_t dev) 1200 { 1201 device_set_desc(dev, "software crypto"); 1202 return (BUS_PROBE_NOWILDCARD); 1203 } 1204 1205 static int 1206 swcr_attach(device_t dev) 1207 { 1208 rw_init(&swcr_sessions_lock, "swcr_sessions_lock"); 1209 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 1210 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 1211 1212 swcr_id = crypto_get_driverid(dev, 1213 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1214 if (swcr_id < 0) { 1215 device_printf(dev, "cannot initialize!"); 1216 return ENOMEM; 1217 } 1218 #define REGISTER(alg) \ 1219 crypto_register(swcr_id, alg, 0,0) 1220 REGISTER(CRYPTO_DES_CBC); 1221 REGISTER(CRYPTO_3DES_CBC); 1222 REGISTER(CRYPTO_BLF_CBC); 1223 REGISTER(CRYPTO_CAST_CBC); 1224 REGISTER(CRYPTO_SKIPJACK_CBC); 1225 REGISTER(CRYPTO_NULL_CBC); 1226 REGISTER(CRYPTO_MD5_HMAC); 1227 REGISTER(CRYPTO_SHA1_HMAC); 1228 REGISTER(CRYPTO_SHA2_256_HMAC); 1229 REGISTER(CRYPTO_SHA2_384_HMAC); 1230 REGISTER(CRYPTO_SHA2_512_HMAC); 1231 REGISTER(CRYPTO_RIPEMD160_HMAC); 1232 REGISTER(CRYPTO_NULL_HMAC); 1233 REGISTER(CRYPTO_MD5_KPDK); 1234 REGISTER(CRYPTO_SHA1_KPDK); 1235 REGISTER(CRYPTO_MD5); 1236 REGISTER(CRYPTO_SHA1); 1237 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1238 REGISTER(CRYPTO_AES_XTS); 1239 REGISTER(CRYPTO_AES_ICM); 1240 REGISTER(CRYPTO_AES_NIST_GCM_16); 1241 REGISTER(CRYPTO_AES_NIST_GMAC); 1242 REGISTER(CRYPTO_AES_128_NIST_GMAC); 1243 REGISTER(CRYPTO_AES_192_NIST_GMAC); 1244 REGISTER(CRYPTO_AES_256_NIST_GMAC); 1245 REGISTER(CRYPTO_CAMELLIA_CBC); 1246 REGISTER(CRYPTO_DEFLATE_COMP); 1247 #undef REGISTER 1248 1249 return 0; 1250 } 1251 1252 static int 1253 swcr_detach(device_t dev) 1254 { 1255 crypto_unregister_all(swcr_id); 1256 rw_wlock(&swcr_sessions_lock); 1257 free(swcr_sessions, M_CRYPTO_DATA); 1258 swcr_sessions = NULL; 1259 rw_wunlock(&swcr_sessions_lock); 1260 rw_destroy(&swcr_sessions_lock); 1261 return 0; 1262 } 1263 1264 static device_method_t swcr_methods[] = { 1265 DEVMETHOD(device_identify, swcr_identify), 1266 DEVMETHOD(device_probe, swcr_probe), 1267 DEVMETHOD(device_attach, swcr_attach), 1268 DEVMETHOD(device_detach, swcr_detach), 1269 1270 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1271 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1272 DEVMETHOD(cryptodev_process, swcr_process), 1273 1274 {0, 0}, 1275 }; 1276 1277 static driver_t swcr_driver = { 1278 "cryptosoft", 1279 swcr_methods, 1280 0, /* NB: no softc */ 1281 }; 1282 static devclass_t swcr_devclass; 1283 1284 /* 1285 * NB: We explicitly reference the crypto module so we 1286 * get the necessary ordering when built as a loadable 1287 * module. This is required because we bundle the crypto 1288 * module code together with the cryptosoft driver (otherwise 1289 * normal module dependencies would handle things). 1290 */ 1291 extern int crypto_modevent(struct module *, int, void *); 1292 /* XXX where to attach */ 1293 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1294 MODULE_VERSION(cryptosoft, 1); 1295 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1296