1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * 13 * Permission to use, copy, and modify this software with or without fee 14 * is hereby granted, provided that this entire notice is included in 15 * all source code copies of any software which is or includes a copy or 16 * modification of this software. 17 * 18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 22 * PURPOSE. 23 */ 24 25 #include <sys/cdefs.h> 26 __FBSDID("$FreeBSD$"); 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/malloc.h> 31 #include <sys/mbuf.h> 32 #include <sys/module.h> 33 #include <sys/sysctl.h> 34 #include <sys/errno.h> 35 #include <sys/random.h> 36 #include <sys/kernel.h> 37 #include <sys/uio.h> 38 #include <sys/lock.h> 39 #include <sys/rwlock.h> 40 41 #include <crypto/blowfish/blowfish.h> 42 #include <crypto/sha1.h> 43 #include <opencrypto/rmd160.h> 44 #include <opencrypto/cast.h> 45 #include <opencrypto/skipjack.h> 46 #include <sys/md5.h> 47 48 #include <opencrypto/cryptodev.h> 49 #include <opencrypto/cryptosoft.h> 50 #include <opencrypto/xform.h> 51 52 #include <sys/kobj.h> 53 #include <sys/bus.h> 54 #include "cryptodev_if.h" 55 56 static int32_t swcr_id; 57 static struct swcr_data **swcr_sessions = NULL; 58 static u_int32_t swcr_sesnum; 59 /* Protects swcr_sessions pointer, not data. */ 60 static struct rwlock swcr_sessions_lock; 61 62 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 63 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 64 65 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 66 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 67 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 68 static int swcr_freesession(device_t dev, u_int64_t tid); 69 static int swcr_freesession_locked(device_t dev, u_int64_t tid); 70 71 /* 72 * Apply a symmetric encryption/decryption algorithm. 73 */ 74 static int 75 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 76 int flags) 77 { 78 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 79 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; 80 struct enc_xform *exf; 81 int i, k, j, blks; 82 83 exf = sw->sw_exf; 84 blks = exf->blocksize; 85 86 /* Check for non-padded data */ 87 if (crd->crd_len % blks) 88 return EINVAL; 89 90 /* Initialize the IV */ 91 if (crd->crd_flags & CRD_F_ENCRYPT) { 92 /* IV explicitly provided ? */ 93 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 94 bcopy(crd->crd_iv, iv, blks); 95 else 96 arc4rand(iv, blks, 0); 97 98 /* Do we need to write the IV */ 99 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 100 crypto_copyback(flags, buf, crd->crd_inject, blks, iv); 101 102 } else { /* Decryption */ 103 /* IV explicitly provided ? */ 104 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 105 bcopy(crd->crd_iv, iv, blks); 106 else { 107 /* Get IV off buf */ 108 crypto_copydata(flags, buf, crd->crd_inject, blks, iv); 109 } 110 } 111 112 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 113 int error; 114 115 if (sw->sw_kschedule) 116 exf->zerokey(&(sw->sw_kschedule)); 117 error = exf->setkey(&sw->sw_kschedule, 118 crd->crd_key, crd->crd_klen / 8); 119 if (error) 120 return (error); 121 } 122 123 ivp = iv; 124 125 /* 126 * xforms that provide a reinit method perform all IV 127 * handling themselves. 128 */ 129 if (exf->reinit) 130 exf->reinit(sw->sw_kschedule, iv); 131 132 if (flags & CRYPTO_F_IMBUF) { 133 struct mbuf *m = (struct mbuf *) buf; 134 135 /* Find beginning of data */ 136 m = m_getptr(m, crd->crd_skip, &k); 137 if (m == NULL) 138 return EINVAL; 139 140 i = crd->crd_len; 141 142 while (i > 0) { 143 /* 144 * If there's insufficient data at the end of 145 * an mbuf, we have to do some copying. 146 */ 147 if (m->m_len < k + blks && m->m_len != k) { 148 m_copydata(m, k, blks, blk); 149 150 /* Actual encryption/decryption */ 151 if (exf->reinit) { 152 if (crd->crd_flags & CRD_F_ENCRYPT) { 153 exf->encrypt(sw->sw_kschedule, 154 blk); 155 } else { 156 exf->decrypt(sw->sw_kschedule, 157 blk); 158 } 159 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 160 /* XOR with previous block */ 161 for (j = 0; j < blks; j++) 162 blk[j] ^= ivp[j]; 163 164 exf->encrypt(sw->sw_kschedule, blk); 165 166 /* 167 * Keep encrypted block for XOR'ing 168 * with next block 169 */ 170 bcopy(blk, iv, blks); 171 ivp = iv; 172 } else { /* decrypt */ 173 /* 174 * Keep encrypted block for XOR'ing 175 * with next block 176 */ 177 if (ivp == iv) 178 bcopy(blk, piv, blks); 179 else 180 bcopy(blk, iv, blks); 181 182 exf->decrypt(sw->sw_kschedule, blk); 183 184 /* XOR with previous block */ 185 for (j = 0; j < blks; j++) 186 blk[j] ^= ivp[j]; 187 188 if (ivp == iv) 189 bcopy(piv, iv, blks); 190 else 191 ivp = iv; 192 } 193 194 /* Copy back decrypted block */ 195 m_copyback(m, k, blks, blk); 196 197 /* Advance pointer */ 198 m = m_getptr(m, k + blks, &k); 199 if (m == NULL) 200 return EINVAL; 201 202 i -= blks; 203 204 /* Could be done... */ 205 if (i == 0) 206 break; 207 } 208 209 /* Skip possibly empty mbufs */ 210 if (k == m->m_len) { 211 for (m = m->m_next; m && m->m_len == 0; 212 m = m->m_next) 213 ; 214 k = 0; 215 } 216 217 /* Sanity check */ 218 if (m == NULL) 219 return EINVAL; 220 221 /* 222 * Warning: idat may point to garbage here, but 223 * we only use it in the while() loop, only if 224 * there are indeed enough data. 225 */ 226 idat = mtod(m, unsigned char *) + k; 227 228 while (m->m_len >= k + blks && i > 0) { 229 if (exf->reinit) { 230 if (crd->crd_flags & CRD_F_ENCRYPT) { 231 exf->encrypt(sw->sw_kschedule, 232 idat); 233 } else { 234 exf->decrypt(sw->sw_kschedule, 235 idat); 236 } 237 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 238 /* XOR with previous block/IV */ 239 for (j = 0; j < blks; j++) 240 idat[j] ^= ivp[j]; 241 242 exf->encrypt(sw->sw_kschedule, idat); 243 ivp = idat; 244 } else { /* decrypt */ 245 /* 246 * Keep encrypted block to be used 247 * in next block's processing. 248 */ 249 if (ivp == iv) 250 bcopy(idat, piv, blks); 251 else 252 bcopy(idat, iv, blks); 253 254 exf->decrypt(sw->sw_kschedule, idat); 255 256 /* XOR with previous block/IV */ 257 for (j = 0; j < blks; j++) 258 idat[j] ^= ivp[j]; 259 260 if (ivp == iv) 261 bcopy(piv, iv, blks); 262 else 263 ivp = iv; 264 } 265 266 idat += blks; 267 k += blks; 268 i -= blks; 269 } 270 } 271 272 return 0; /* Done with mbuf encryption/decryption */ 273 } else if (flags & CRYPTO_F_IOV) { 274 struct uio *uio = (struct uio *) buf; 275 struct iovec *iov; 276 277 /* Find beginning of data */ 278 iov = cuio_getptr(uio, crd->crd_skip, &k); 279 if (iov == NULL) 280 return EINVAL; 281 282 i = crd->crd_len; 283 284 while (i > 0) { 285 /* 286 * If there's insufficient data at the end of 287 * an iovec, we have to do some copying. 288 */ 289 if (iov->iov_len < k + blks && iov->iov_len != k) { 290 cuio_copydata(uio, k, blks, blk); 291 292 /* Actual encryption/decryption */ 293 if (exf->reinit) { 294 if (crd->crd_flags & CRD_F_ENCRYPT) { 295 exf->encrypt(sw->sw_kschedule, 296 blk); 297 } else { 298 exf->decrypt(sw->sw_kschedule, 299 blk); 300 } 301 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 302 /* XOR with previous block */ 303 for (j = 0; j < blks; j++) 304 blk[j] ^= ivp[j]; 305 306 exf->encrypt(sw->sw_kschedule, blk); 307 308 /* 309 * Keep encrypted block for XOR'ing 310 * with next block 311 */ 312 bcopy(blk, iv, blks); 313 ivp = iv; 314 } else { /* decrypt */ 315 /* 316 * Keep encrypted block for XOR'ing 317 * with next block 318 */ 319 if (ivp == iv) 320 bcopy(blk, piv, blks); 321 else 322 bcopy(blk, iv, blks); 323 324 exf->decrypt(sw->sw_kschedule, blk); 325 326 /* XOR with previous block */ 327 for (j = 0; j < blks; j++) 328 blk[j] ^= ivp[j]; 329 330 if (ivp == iv) 331 bcopy(piv, iv, blks); 332 else 333 ivp = iv; 334 } 335 336 /* Copy back decrypted block */ 337 cuio_copyback(uio, k, blks, blk); 338 339 /* Advance pointer */ 340 iov = cuio_getptr(uio, k + blks, &k); 341 if (iov == NULL) 342 return EINVAL; 343 344 i -= blks; 345 346 /* Could be done... */ 347 if (i == 0) 348 break; 349 } 350 351 /* 352 * Warning: idat may point to garbage here, but 353 * we only use it in the while() loop, only if 354 * there are indeed enough data. 355 */ 356 idat = (char *)iov->iov_base + k; 357 358 while (iov->iov_len >= k + blks && i > 0) { 359 if (exf->reinit) { 360 if (crd->crd_flags & CRD_F_ENCRYPT) { 361 exf->encrypt(sw->sw_kschedule, 362 idat); 363 } else { 364 exf->decrypt(sw->sw_kschedule, 365 idat); 366 } 367 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 368 /* XOR with previous block/IV */ 369 for (j = 0; j < blks; j++) 370 idat[j] ^= ivp[j]; 371 372 exf->encrypt(sw->sw_kschedule, idat); 373 ivp = idat; 374 } else { /* decrypt */ 375 /* 376 * Keep encrypted block to be used 377 * in next block's processing. 378 */ 379 if (ivp == iv) 380 bcopy(idat, piv, blks); 381 else 382 bcopy(idat, iv, blks); 383 384 exf->decrypt(sw->sw_kschedule, idat); 385 386 /* XOR with previous block/IV */ 387 for (j = 0; j < blks; j++) 388 idat[j] ^= ivp[j]; 389 390 if (ivp == iv) 391 bcopy(piv, iv, blks); 392 else 393 ivp = iv; 394 } 395 396 idat += blks; 397 k += blks; 398 i -= blks; 399 } 400 if (k == iov->iov_len) { 401 iov++; 402 k = 0; 403 } 404 } 405 406 return 0; /* Done with iovec encryption/decryption */ 407 } else { /* contiguous buffer */ 408 if (exf->reinit) { 409 for (i = crd->crd_skip; 410 i < crd->crd_skip + crd->crd_len; i += blks) { 411 if (crd->crd_flags & CRD_F_ENCRYPT) 412 exf->encrypt(sw->sw_kschedule, buf + i); 413 else 414 exf->decrypt(sw->sw_kschedule, buf + i); 415 } 416 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 417 for (i = crd->crd_skip; 418 i < crd->crd_skip + crd->crd_len; i += blks) { 419 /* XOR with the IV/previous block, as appropriate. */ 420 if (i == crd->crd_skip) 421 for (k = 0; k < blks; k++) 422 buf[i + k] ^= ivp[k]; 423 else 424 for (k = 0; k < blks; k++) 425 buf[i + k] ^= buf[i + k - blks]; 426 exf->encrypt(sw->sw_kschedule, buf + i); 427 } 428 } else { /* Decrypt */ 429 /* 430 * Start at the end, so we don't need to keep the encrypted 431 * block as the IV for the next block. 432 */ 433 for (i = crd->crd_skip + crd->crd_len - blks; 434 i >= crd->crd_skip; i -= blks) { 435 exf->decrypt(sw->sw_kschedule, buf + i); 436 437 /* XOR with the IV/previous block, as appropriate */ 438 if (i == crd->crd_skip) 439 for (k = 0; k < blks; k++) 440 buf[i + k] ^= ivp[k]; 441 else 442 for (k = 0; k < blks; k++) 443 buf[i + k] ^= buf[i + k - blks]; 444 } 445 } 446 447 return 0; /* Done with contiguous buffer encryption/decryption */ 448 } 449 450 /* Unreachable */ 451 return EINVAL; 452 } 453 454 static void 455 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 456 int klen) 457 { 458 int k; 459 460 klen /= 8; 461 462 switch (axf->type) { 463 case CRYPTO_MD5_HMAC: 464 case CRYPTO_SHA1_HMAC: 465 case CRYPTO_SHA2_256_HMAC: 466 case CRYPTO_SHA2_384_HMAC: 467 case CRYPTO_SHA2_512_HMAC: 468 case CRYPTO_NULL_HMAC: 469 case CRYPTO_RIPEMD160_HMAC: 470 for (k = 0; k < klen; k++) 471 key[k] ^= HMAC_IPAD_VAL; 472 473 axf->Init(sw->sw_ictx); 474 axf->Update(sw->sw_ictx, key, klen); 475 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 476 477 for (k = 0; k < klen; k++) 478 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 479 480 axf->Init(sw->sw_octx); 481 axf->Update(sw->sw_octx, key, klen); 482 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 483 484 for (k = 0; k < klen; k++) 485 key[k] ^= HMAC_OPAD_VAL; 486 break; 487 case CRYPTO_MD5_KPDK: 488 case CRYPTO_SHA1_KPDK: 489 { 490 /* 491 * We need a buffer that can hold an md5 and a sha1 result 492 * just to throw it away. 493 * What we do here is the initial part of: 494 * ALGO( key, keyfill, .. ) 495 * adding the key to sw_ictx and abusing Final() to get the 496 * "keyfill" padding. 497 * In addition we abuse the sw_octx to save the key to have 498 * it to be able to append it at the end in swcr_authcompute(). 499 */ 500 u_char buf[SHA1_RESULTLEN]; 501 502 sw->sw_klen = klen; 503 bcopy(key, sw->sw_octx, klen); 504 axf->Init(sw->sw_ictx); 505 axf->Update(sw->sw_ictx, key, klen); 506 axf->Final(buf, sw->sw_ictx); 507 break; 508 } 509 default: 510 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 511 "doesn't use keys.\n", __func__, axf->type); 512 } 513 } 514 515 /* 516 * Compute keyed-hash authenticator. 517 */ 518 static int 519 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 520 int flags) 521 { 522 unsigned char aalg[HASH_MAX_LEN]; 523 struct auth_hash *axf; 524 union authctx ctx; 525 int err; 526 527 if (sw->sw_ictx == 0) 528 return EINVAL; 529 530 axf = sw->sw_axf; 531 532 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 533 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 534 535 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 536 537 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 538 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 539 if (err) 540 return err; 541 542 switch (sw->sw_alg) { 543 case CRYPTO_MD5_HMAC: 544 case CRYPTO_SHA1_HMAC: 545 case CRYPTO_SHA2_256_HMAC: 546 case CRYPTO_SHA2_384_HMAC: 547 case CRYPTO_SHA2_512_HMAC: 548 case CRYPTO_RIPEMD160_HMAC: 549 if (sw->sw_octx == NULL) 550 return EINVAL; 551 552 axf->Final(aalg, &ctx); 553 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 554 axf->Update(&ctx, aalg, axf->hashsize); 555 axf->Final(aalg, &ctx); 556 break; 557 558 case CRYPTO_MD5_KPDK: 559 case CRYPTO_SHA1_KPDK: 560 /* If we have no key saved, return error. */ 561 if (sw->sw_octx == NULL) 562 return EINVAL; 563 564 /* 565 * Add the trailing copy of the key (see comment in 566 * swcr_authprepare()) after the data: 567 * ALGO( .., key, algofill ) 568 * and let Final() do the proper, natural "algofill" 569 * padding. 570 */ 571 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 572 axf->Final(aalg, &ctx); 573 break; 574 575 case CRYPTO_NULL_HMAC: 576 axf->Final(aalg, &ctx); 577 break; 578 } 579 580 /* Inject the authentication data */ 581 crypto_copyback(flags, buf, crd->crd_inject, 582 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 583 return 0; 584 } 585 586 /* 587 * Apply a compression/decompression algorithm 588 */ 589 static int 590 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 591 caddr_t buf, int flags) 592 { 593 u_int8_t *data, *out; 594 struct comp_algo *cxf; 595 int adj; 596 u_int32_t result; 597 598 cxf = sw->sw_cxf; 599 600 /* We must handle the whole buffer of data in one time 601 * then if there is not all the data in the mbuf, we must 602 * copy in a buffer. 603 */ 604 605 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 606 if (data == NULL) 607 return (EINVAL); 608 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 609 610 if (crd->crd_flags & CRD_F_COMP) 611 result = cxf->compress(data, crd->crd_len, &out); 612 else 613 result = cxf->decompress(data, crd->crd_len, &out); 614 615 free(data, M_CRYPTO_DATA); 616 if (result == 0) 617 return EINVAL; 618 619 /* Copy back the (de)compressed data. m_copyback is 620 * extending the mbuf as necessary. 621 */ 622 sw->sw_size = result; 623 /* Check the compressed size when doing compression */ 624 if (crd->crd_flags & CRD_F_COMP) { 625 if (result >= crd->crd_len) { 626 /* Compression was useless, we lost time */ 627 free(out, M_CRYPTO_DATA); 628 return 0; 629 } 630 } 631 632 crypto_copyback(flags, buf, crd->crd_skip, result, out); 633 if (result < crd->crd_len) { 634 adj = result - crd->crd_len; 635 if (flags & CRYPTO_F_IMBUF) { 636 adj = result - crd->crd_len; 637 m_adj((struct mbuf *)buf, adj); 638 } else if (flags & CRYPTO_F_IOV) { 639 struct uio *uio = (struct uio *)buf; 640 int ind; 641 642 adj = crd->crd_len - result; 643 ind = uio->uio_iovcnt - 1; 644 645 while (adj > 0 && ind >= 0) { 646 if (adj < uio->uio_iov[ind].iov_len) { 647 uio->uio_iov[ind].iov_len -= adj; 648 break; 649 } 650 651 adj -= uio->uio_iov[ind].iov_len; 652 uio->uio_iov[ind].iov_len = 0; 653 ind--; 654 uio->uio_iovcnt--; 655 } 656 } 657 } 658 free(out, M_CRYPTO_DATA); 659 return 0; 660 } 661 662 /* 663 * Generate a new software session. 664 */ 665 static int 666 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 667 { 668 struct swcr_data **swd; 669 struct auth_hash *axf; 670 struct enc_xform *txf; 671 struct comp_algo *cxf; 672 u_int32_t i; 673 int error; 674 675 if (sid == NULL || cri == NULL) 676 return EINVAL; 677 678 rw_wlock(&swcr_sessions_lock); 679 if (swcr_sessions) { 680 for (i = 1; i < swcr_sesnum; i++) 681 if (swcr_sessions[i] == NULL) 682 break; 683 } else 684 i = 1; /* NB: to silence compiler warning */ 685 686 if (swcr_sessions == NULL || i == swcr_sesnum) { 687 if (swcr_sessions == NULL) { 688 i = 1; /* We leave swcr_sessions[0] empty */ 689 swcr_sesnum = CRYPTO_SW_SESSIONS; 690 } else 691 swcr_sesnum *= 2; 692 693 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 694 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 695 if (swd == NULL) { 696 /* Reset session number */ 697 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 698 swcr_sesnum = 0; 699 else 700 swcr_sesnum /= 2; 701 rw_wunlock(&swcr_sessions_lock); 702 return ENOBUFS; 703 } 704 705 /* Copy existing sessions */ 706 if (swcr_sessions != NULL) { 707 bcopy(swcr_sessions, swd, 708 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 709 free(swcr_sessions, M_CRYPTO_DATA); 710 } 711 712 swcr_sessions = swd; 713 } 714 715 rw_downgrade(&swcr_sessions_lock); 716 swd = &swcr_sessions[i]; 717 *sid = i; 718 719 while (cri) { 720 *swd = malloc(sizeof(struct swcr_data), 721 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 722 if (*swd == NULL) { 723 swcr_freesession_locked(dev, i); 724 rw_runlock(&swcr_sessions_lock); 725 return ENOBUFS; 726 } 727 728 switch (cri->cri_alg) { 729 case CRYPTO_DES_CBC: 730 txf = &enc_xform_des; 731 goto enccommon; 732 case CRYPTO_3DES_CBC: 733 txf = &enc_xform_3des; 734 goto enccommon; 735 case CRYPTO_BLF_CBC: 736 txf = &enc_xform_blf; 737 goto enccommon; 738 case CRYPTO_CAST_CBC: 739 txf = &enc_xform_cast5; 740 goto enccommon; 741 case CRYPTO_SKIPJACK_CBC: 742 txf = &enc_xform_skipjack; 743 goto enccommon; 744 case CRYPTO_RIJNDAEL128_CBC: 745 txf = &enc_xform_rijndael128; 746 goto enccommon; 747 case CRYPTO_AES_XTS: 748 txf = &enc_xform_aes_xts; 749 goto enccommon; 750 case CRYPTO_CAMELLIA_CBC: 751 txf = &enc_xform_camellia; 752 goto enccommon; 753 case CRYPTO_NULL_CBC: 754 txf = &enc_xform_null; 755 goto enccommon; 756 enccommon: 757 if (cri->cri_key != NULL) { 758 error = txf->setkey(&((*swd)->sw_kschedule), 759 cri->cri_key, cri->cri_klen / 8); 760 if (error) { 761 swcr_freesession_locked(dev, i); 762 rw_runlock(&swcr_sessions_lock); 763 return error; 764 } 765 } 766 (*swd)->sw_exf = txf; 767 break; 768 769 case CRYPTO_MD5_HMAC: 770 axf = &auth_hash_hmac_md5; 771 goto authcommon; 772 case CRYPTO_SHA1_HMAC: 773 axf = &auth_hash_hmac_sha1; 774 goto authcommon; 775 case CRYPTO_SHA2_256_HMAC: 776 axf = &auth_hash_hmac_sha2_256; 777 goto authcommon; 778 case CRYPTO_SHA2_384_HMAC: 779 axf = &auth_hash_hmac_sha2_384; 780 goto authcommon; 781 case CRYPTO_SHA2_512_HMAC: 782 axf = &auth_hash_hmac_sha2_512; 783 goto authcommon; 784 case CRYPTO_NULL_HMAC: 785 axf = &auth_hash_null; 786 goto authcommon; 787 case CRYPTO_RIPEMD160_HMAC: 788 axf = &auth_hash_hmac_ripemd_160; 789 authcommon: 790 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 791 M_NOWAIT); 792 if ((*swd)->sw_ictx == NULL) { 793 swcr_freesession_locked(dev, i); 794 rw_runlock(&swcr_sessions_lock); 795 return ENOBUFS; 796 } 797 798 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 799 M_NOWAIT); 800 if ((*swd)->sw_octx == NULL) { 801 swcr_freesession_locked(dev, i); 802 rw_runlock(&swcr_sessions_lock); 803 return ENOBUFS; 804 } 805 806 if (cri->cri_key != NULL) { 807 swcr_authprepare(axf, *swd, cri->cri_key, 808 cri->cri_klen); 809 } 810 811 (*swd)->sw_mlen = cri->cri_mlen; 812 (*swd)->sw_axf = axf; 813 break; 814 815 case CRYPTO_MD5_KPDK: 816 axf = &auth_hash_key_md5; 817 goto auth2common; 818 819 case CRYPTO_SHA1_KPDK: 820 axf = &auth_hash_key_sha1; 821 auth2common: 822 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 823 M_NOWAIT); 824 if ((*swd)->sw_ictx == NULL) { 825 swcr_freesession_locked(dev, i); 826 rw_runlock(&swcr_sessions_lock); 827 return ENOBUFS; 828 } 829 830 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 831 M_CRYPTO_DATA, M_NOWAIT); 832 if ((*swd)->sw_octx == NULL) { 833 swcr_freesession_locked(dev, i); 834 rw_runlock(&swcr_sessions_lock); 835 return ENOBUFS; 836 } 837 838 /* Store the key so we can "append" it to the payload */ 839 if (cri->cri_key != NULL) { 840 swcr_authprepare(axf, *swd, cri->cri_key, 841 cri->cri_klen); 842 } 843 844 (*swd)->sw_mlen = cri->cri_mlen; 845 (*swd)->sw_axf = axf; 846 break; 847 #ifdef notdef 848 case CRYPTO_MD5: 849 axf = &auth_hash_md5; 850 goto auth3common; 851 852 case CRYPTO_SHA1: 853 axf = &auth_hash_sha1; 854 auth3common: 855 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 856 M_NOWAIT); 857 if ((*swd)->sw_ictx == NULL) { 858 swcr_freesession_locked(dev, i); 859 rw_runlock(&swcr_sessions_lock); 860 return ENOBUFS; 861 } 862 863 axf->Init((*swd)->sw_ictx); 864 (*swd)->sw_mlen = cri->cri_mlen; 865 (*swd)->sw_axf = axf; 866 break; 867 #endif 868 case CRYPTO_DEFLATE_COMP: 869 cxf = &comp_algo_deflate; 870 (*swd)->sw_cxf = cxf; 871 break; 872 default: 873 swcr_freesession_locked(dev, i); 874 rw_runlock(&swcr_sessions_lock); 875 return EINVAL; 876 } 877 878 (*swd)->sw_alg = cri->cri_alg; 879 cri = cri->cri_next; 880 swd = &((*swd)->sw_next); 881 } 882 rw_runlock(&swcr_sessions_lock); 883 return 0; 884 } 885 886 static int 887 swcr_freesession(device_t dev, u_int64_t tid) 888 { 889 int error; 890 891 rw_rlock(&swcr_sessions_lock); 892 error = swcr_freesession_locked(dev, tid); 893 rw_runlock(&swcr_sessions_lock); 894 return error; 895 } 896 897 /* 898 * Free a session. 899 */ 900 static int 901 swcr_freesession_locked(device_t dev, u_int64_t tid) 902 { 903 struct swcr_data *swd; 904 struct enc_xform *txf; 905 struct auth_hash *axf; 906 struct comp_algo *cxf; 907 u_int32_t sid = CRYPTO_SESID2LID(tid); 908 909 if (sid > swcr_sesnum || swcr_sessions == NULL || 910 swcr_sessions[sid] == NULL) 911 return EINVAL; 912 913 /* Silently accept and return */ 914 if (sid == 0) 915 return 0; 916 917 while ((swd = swcr_sessions[sid]) != NULL) { 918 swcr_sessions[sid] = swd->sw_next; 919 920 switch (swd->sw_alg) { 921 case CRYPTO_DES_CBC: 922 case CRYPTO_3DES_CBC: 923 case CRYPTO_BLF_CBC: 924 case CRYPTO_CAST_CBC: 925 case CRYPTO_SKIPJACK_CBC: 926 case CRYPTO_RIJNDAEL128_CBC: 927 case CRYPTO_AES_XTS: 928 case CRYPTO_CAMELLIA_CBC: 929 case CRYPTO_NULL_CBC: 930 txf = swd->sw_exf; 931 932 if (swd->sw_kschedule) 933 txf->zerokey(&(swd->sw_kschedule)); 934 break; 935 936 case CRYPTO_MD5_HMAC: 937 case CRYPTO_SHA1_HMAC: 938 case CRYPTO_SHA2_256_HMAC: 939 case CRYPTO_SHA2_384_HMAC: 940 case CRYPTO_SHA2_512_HMAC: 941 case CRYPTO_RIPEMD160_HMAC: 942 case CRYPTO_NULL_HMAC: 943 axf = swd->sw_axf; 944 945 if (swd->sw_ictx) { 946 bzero(swd->sw_ictx, axf->ctxsize); 947 free(swd->sw_ictx, M_CRYPTO_DATA); 948 } 949 if (swd->sw_octx) { 950 bzero(swd->sw_octx, axf->ctxsize); 951 free(swd->sw_octx, M_CRYPTO_DATA); 952 } 953 break; 954 955 case CRYPTO_MD5_KPDK: 956 case CRYPTO_SHA1_KPDK: 957 axf = swd->sw_axf; 958 959 if (swd->sw_ictx) { 960 bzero(swd->sw_ictx, axf->ctxsize); 961 free(swd->sw_ictx, M_CRYPTO_DATA); 962 } 963 if (swd->sw_octx) { 964 bzero(swd->sw_octx, swd->sw_klen); 965 free(swd->sw_octx, M_CRYPTO_DATA); 966 } 967 break; 968 969 case CRYPTO_MD5: 970 case CRYPTO_SHA1: 971 axf = swd->sw_axf; 972 973 if (swd->sw_ictx) 974 free(swd->sw_ictx, M_CRYPTO_DATA); 975 break; 976 977 case CRYPTO_DEFLATE_COMP: 978 cxf = swd->sw_cxf; 979 break; 980 } 981 982 free(swd, M_CRYPTO_DATA); 983 } 984 return 0; 985 } 986 987 /* 988 * Process a software request. 989 */ 990 static int 991 swcr_process(device_t dev, struct cryptop *crp, int hint) 992 { 993 struct cryptodesc *crd; 994 struct swcr_data *sw; 995 u_int32_t lid; 996 997 /* Sanity check */ 998 if (crp == NULL) 999 return EINVAL; 1000 1001 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 1002 crp->crp_etype = EINVAL; 1003 goto done; 1004 } 1005 1006 lid = CRYPTO_SESID2LID(crp->crp_sid); 1007 rw_rlock(&swcr_sessions_lock); 1008 if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 || 1009 swcr_sessions[lid] == NULL) { 1010 rw_runlock(&swcr_sessions_lock); 1011 crp->crp_etype = ENOENT; 1012 goto done; 1013 } 1014 rw_runlock(&swcr_sessions_lock); 1015 1016 /* Go through crypto descriptors, processing as we go */ 1017 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1018 /* 1019 * Find the crypto context. 1020 * 1021 * XXX Note that the logic here prevents us from having 1022 * XXX the same algorithm multiple times in a session 1023 * XXX (or rather, we can but it won't give us the right 1024 * XXX results). To do that, we'd need some way of differentiating 1025 * XXX between the various instances of an algorithm (so we can 1026 * XXX locate the correct crypto context). 1027 */ 1028 rw_rlock(&swcr_sessions_lock); 1029 if (swcr_sessions == NULL) { 1030 rw_runlock(&swcr_sessions_lock); 1031 crp->crp_etype = ENOENT; 1032 goto done; 1033 } 1034 for (sw = swcr_sessions[lid]; 1035 sw && sw->sw_alg != crd->crd_alg; 1036 sw = sw->sw_next) 1037 ; 1038 rw_runlock(&swcr_sessions_lock); 1039 1040 /* No such context ? */ 1041 if (sw == NULL) { 1042 crp->crp_etype = EINVAL; 1043 goto done; 1044 } 1045 switch (sw->sw_alg) { 1046 case CRYPTO_DES_CBC: 1047 case CRYPTO_3DES_CBC: 1048 case CRYPTO_BLF_CBC: 1049 case CRYPTO_CAST_CBC: 1050 case CRYPTO_SKIPJACK_CBC: 1051 case CRYPTO_RIJNDAEL128_CBC: 1052 case CRYPTO_AES_XTS: 1053 case CRYPTO_CAMELLIA_CBC: 1054 if ((crp->crp_etype = swcr_encdec(crd, sw, 1055 crp->crp_buf, crp->crp_flags)) != 0) 1056 goto done; 1057 break; 1058 case CRYPTO_NULL_CBC: 1059 crp->crp_etype = 0; 1060 break; 1061 case CRYPTO_MD5_HMAC: 1062 case CRYPTO_SHA1_HMAC: 1063 case CRYPTO_SHA2_256_HMAC: 1064 case CRYPTO_SHA2_384_HMAC: 1065 case CRYPTO_SHA2_512_HMAC: 1066 case CRYPTO_RIPEMD160_HMAC: 1067 case CRYPTO_NULL_HMAC: 1068 case CRYPTO_MD5_KPDK: 1069 case CRYPTO_SHA1_KPDK: 1070 case CRYPTO_MD5: 1071 case CRYPTO_SHA1: 1072 if ((crp->crp_etype = swcr_authcompute(crd, sw, 1073 crp->crp_buf, crp->crp_flags)) != 0) 1074 goto done; 1075 break; 1076 1077 case CRYPTO_DEFLATE_COMP: 1078 if ((crp->crp_etype = swcr_compdec(crd, sw, 1079 crp->crp_buf, crp->crp_flags)) != 0) 1080 goto done; 1081 else 1082 crp->crp_olen = (int)sw->sw_size; 1083 break; 1084 1085 default: 1086 /* Unknown/unsupported algorithm */ 1087 crp->crp_etype = EINVAL; 1088 goto done; 1089 } 1090 } 1091 1092 done: 1093 crypto_done(crp); 1094 return 0; 1095 } 1096 1097 static void 1098 swcr_identify(driver_t *drv, device_t parent) 1099 { 1100 /* NB: order 10 is so we get attached after h/w devices */ 1101 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1102 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1103 panic("cryptosoft: could not attach"); 1104 } 1105 1106 static int 1107 swcr_probe(device_t dev) 1108 { 1109 device_set_desc(dev, "software crypto"); 1110 return (BUS_PROBE_NOWILDCARD); 1111 } 1112 1113 static int 1114 swcr_attach(device_t dev) 1115 { 1116 rw_init(&swcr_sessions_lock, "swcr_sessions_lock"); 1117 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 1118 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 1119 1120 swcr_id = crypto_get_driverid(dev, 1121 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1122 if (swcr_id < 0) { 1123 device_printf(dev, "cannot initialize!"); 1124 return ENOMEM; 1125 } 1126 #define REGISTER(alg) \ 1127 crypto_register(swcr_id, alg, 0,0) 1128 REGISTER(CRYPTO_DES_CBC); 1129 REGISTER(CRYPTO_3DES_CBC); 1130 REGISTER(CRYPTO_BLF_CBC); 1131 REGISTER(CRYPTO_CAST_CBC); 1132 REGISTER(CRYPTO_SKIPJACK_CBC); 1133 REGISTER(CRYPTO_NULL_CBC); 1134 REGISTER(CRYPTO_MD5_HMAC); 1135 REGISTER(CRYPTO_SHA1_HMAC); 1136 REGISTER(CRYPTO_SHA2_256_HMAC); 1137 REGISTER(CRYPTO_SHA2_384_HMAC); 1138 REGISTER(CRYPTO_SHA2_512_HMAC); 1139 REGISTER(CRYPTO_RIPEMD160_HMAC); 1140 REGISTER(CRYPTO_NULL_HMAC); 1141 REGISTER(CRYPTO_MD5_KPDK); 1142 REGISTER(CRYPTO_SHA1_KPDK); 1143 REGISTER(CRYPTO_MD5); 1144 REGISTER(CRYPTO_SHA1); 1145 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1146 REGISTER(CRYPTO_AES_XTS); 1147 REGISTER(CRYPTO_CAMELLIA_CBC); 1148 REGISTER(CRYPTO_DEFLATE_COMP); 1149 #undef REGISTER 1150 1151 return 0; 1152 } 1153 1154 static int 1155 swcr_detach(device_t dev) 1156 { 1157 crypto_unregister_all(swcr_id); 1158 rw_wlock(&swcr_sessions_lock); 1159 free(swcr_sessions, M_CRYPTO_DATA); 1160 swcr_sessions = NULL; 1161 rw_wunlock(&swcr_sessions_lock); 1162 rw_destroy(&swcr_sessions_lock); 1163 return 0; 1164 } 1165 1166 static device_method_t swcr_methods[] = { 1167 DEVMETHOD(device_identify, swcr_identify), 1168 DEVMETHOD(device_probe, swcr_probe), 1169 DEVMETHOD(device_attach, swcr_attach), 1170 DEVMETHOD(device_detach, swcr_detach), 1171 1172 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1173 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1174 DEVMETHOD(cryptodev_process, swcr_process), 1175 1176 {0, 0}, 1177 }; 1178 1179 static driver_t swcr_driver = { 1180 "cryptosoft", 1181 swcr_methods, 1182 0, /* NB: no softc */ 1183 }; 1184 static devclass_t swcr_devclass; 1185 1186 /* 1187 * NB: We explicitly reference the crypto module so we 1188 * get the necessary ordering when built as a loadable 1189 * module. This is required because we bundle the crypto 1190 * module code together with the cryptosoft driver (otherwise 1191 * normal module dependencies would handle things). 1192 */ 1193 extern int crypto_modevent(struct module *, int, void *); 1194 /* XXX where to attach */ 1195 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1196 MODULE_VERSION(cryptosoft, 1); 1197 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1198