1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * 13 * Permission to use, copy, and modify this software with or without fee 14 * is hereby granted, provided that this entire notice is included in 15 * all source code copies of any software which is or includes a copy or 16 * modification of this software. 17 * 18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 22 * PURPOSE. 23 */ 24 25 #include <sys/cdefs.h> 26 __FBSDID("$FreeBSD$"); 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/malloc.h> 31 #include <sys/mbuf.h> 32 #include <sys/module.h> 33 #include <sys/sysctl.h> 34 #include <sys/errno.h> 35 #include <sys/random.h> 36 #include <sys/kernel.h> 37 #include <sys/uio.h> 38 39 #include <crypto/blowfish/blowfish.h> 40 #include <crypto/sha1.h> 41 #include <opencrypto/rmd160.h> 42 #include <opencrypto/cast.h> 43 #include <opencrypto/skipjack.h> 44 #include <sys/md5.h> 45 46 #include <opencrypto/cryptodev.h> 47 #include <opencrypto/cryptosoft.h> 48 #include <opencrypto/xform.h> 49 50 #include <sys/kobj.h> 51 #include <sys/bus.h> 52 #include "cryptodev_if.h" 53 54 static int32_t swcr_id; 55 static struct swcr_data **swcr_sessions = NULL; 56 static u_int32_t swcr_sesnum; 57 58 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 59 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 60 61 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 62 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 63 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 64 static int swcr_freesession(device_t dev, u_int64_t tid); 65 66 /* 67 * Apply a symmetric encryption/decryption algorithm. 68 */ 69 static int 70 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 71 int flags) 72 { 73 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 74 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; 75 struct enc_xform *exf; 76 int i, k, j, blks; 77 78 exf = sw->sw_exf; 79 blks = exf->blocksize; 80 81 /* Check for non-padded data */ 82 if (crd->crd_len % blks) 83 return EINVAL; 84 85 /* Initialize the IV */ 86 if (crd->crd_flags & CRD_F_ENCRYPT) { 87 /* IV explicitly provided ? */ 88 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 89 bcopy(crd->crd_iv, iv, blks); 90 else 91 arc4rand(iv, blks, 0); 92 93 /* Do we need to write the IV */ 94 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 95 crypto_copyback(flags, buf, crd->crd_inject, blks, iv); 96 97 } else { /* Decryption */ 98 /* IV explicitly provided ? */ 99 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 100 bcopy(crd->crd_iv, iv, blks); 101 else { 102 /* Get IV off buf */ 103 crypto_copydata(flags, buf, crd->crd_inject, blks, iv); 104 } 105 } 106 107 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 108 int error; 109 110 if (sw->sw_kschedule) 111 exf->zerokey(&(sw->sw_kschedule)); 112 error = exf->setkey(&sw->sw_kschedule, 113 crd->crd_key, crd->crd_klen / 8); 114 if (error) 115 return (error); 116 } 117 ivp = iv; 118 119 if (flags & CRYPTO_F_IMBUF) { 120 struct mbuf *m = (struct mbuf *) buf; 121 122 /* Find beginning of data */ 123 m = m_getptr(m, crd->crd_skip, &k); 124 if (m == NULL) 125 return EINVAL; 126 127 i = crd->crd_len; 128 129 while (i > 0) { 130 /* 131 * If there's insufficient data at the end of 132 * an mbuf, we have to do some copying. 133 */ 134 if (m->m_len < k + blks && m->m_len != k) { 135 m_copydata(m, k, blks, blk); 136 137 /* Actual encryption/decryption */ 138 if (crd->crd_flags & CRD_F_ENCRYPT) { 139 /* XOR with previous block */ 140 for (j = 0; j < blks; j++) 141 blk[j] ^= ivp[j]; 142 143 exf->encrypt(sw->sw_kschedule, blk); 144 145 /* 146 * Keep encrypted block for XOR'ing 147 * with next block 148 */ 149 bcopy(blk, iv, blks); 150 ivp = iv; 151 } else { /* decrypt */ 152 /* 153 * Keep encrypted block for XOR'ing 154 * with next block 155 */ 156 if (ivp == iv) 157 bcopy(blk, piv, blks); 158 else 159 bcopy(blk, iv, blks); 160 161 exf->decrypt(sw->sw_kschedule, blk); 162 163 /* XOR with previous block */ 164 for (j = 0; j < blks; j++) 165 blk[j] ^= ivp[j]; 166 167 if (ivp == iv) 168 bcopy(piv, iv, blks); 169 else 170 ivp = iv; 171 } 172 173 /* Copy back decrypted block */ 174 m_copyback(m, k, blks, blk); 175 176 /* Advance pointer */ 177 m = m_getptr(m, k + blks, &k); 178 if (m == NULL) 179 return EINVAL; 180 181 i -= blks; 182 183 /* Could be done... */ 184 if (i == 0) 185 break; 186 } 187 188 /* Skip possibly empty mbufs */ 189 if (k == m->m_len) { 190 for (m = m->m_next; m && m->m_len == 0; 191 m = m->m_next) 192 ; 193 k = 0; 194 } 195 196 /* Sanity check */ 197 if (m == NULL) 198 return EINVAL; 199 200 /* 201 * Warning: idat may point to garbage here, but 202 * we only use it in the while() loop, only if 203 * there are indeed enough data. 204 */ 205 idat = mtod(m, unsigned char *) + k; 206 207 while (m->m_len >= k + blks && i > 0) { 208 if (crd->crd_flags & CRD_F_ENCRYPT) { 209 /* XOR with previous block/IV */ 210 for (j = 0; j < blks; j++) 211 idat[j] ^= ivp[j]; 212 213 exf->encrypt(sw->sw_kschedule, idat); 214 ivp = idat; 215 } else { /* decrypt */ 216 /* 217 * Keep encrypted block to be used 218 * in next block's processing. 219 */ 220 if (ivp == iv) 221 bcopy(idat, piv, blks); 222 else 223 bcopy(idat, iv, blks); 224 225 exf->decrypt(sw->sw_kschedule, idat); 226 227 /* XOR with previous block/IV */ 228 for (j = 0; j < blks; j++) 229 idat[j] ^= ivp[j]; 230 231 if (ivp == iv) 232 bcopy(piv, iv, blks); 233 else 234 ivp = iv; 235 } 236 237 idat += blks; 238 k += blks; 239 i -= blks; 240 } 241 } 242 243 return 0; /* Done with mbuf encryption/decryption */ 244 } else if (flags & CRYPTO_F_IOV) { 245 struct uio *uio = (struct uio *) buf; 246 struct iovec *iov; 247 248 /* Find beginning of data */ 249 iov = cuio_getptr(uio, crd->crd_skip, &k); 250 if (iov == NULL) 251 return EINVAL; 252 253 i = crd->crd_len; 254 255 while (i > 0) { 256 /* 257 * If there's insufficient data at the end of 258 * an iovec, we have to do some copying. 259 */ 260 if (iov->iov_len < k + blks && iov->iov_len != k) { 261 cuio_copydata(uio, k, blks, blk); 262 263 /* Actual encryption/decryption */ 264 if (crd->crd_flags & CRD_F_ENCRYPT) { 265 /* XOR with previous block */ 266 for (j = 0; j < blks; j++) 267 blk[j] ^= ivp[j]; 268 269 exf->encrypt(sw->sw_kschedule, blk); 270 271 /* 272 * Keep encrypted block for XOR'ing 273 * with next block 274 */ 275 bcopy(blk, iv, blks); 276 ivp = iv; 277 } else { /* decrypt */ 278 /* 279 * Keep encrypted block for XOR'ing 280 * with next block 281 */ 282 if (ivp == iv) 283 bcopy(blk, piv, blks); 284 else 285 bcopy(blk, iv, blks); 286 287 exf->decrypt(sw->sw_kschedule, blk); 288 289 /* XOR with previous block */ 290 for (j = 0; j < blks; j++) 291 blk[j] ^= ivp[j]; 292 293 if (ivp == iv) 294 bcopy(piv, iv, blks); 295 else 296 ivp = iv; 297 } 298 299 /* Copy back decrypted block */ 300 cuio_copyback(uio, k, blks, blk); 301 302 /* Advance pointer */ 303 iov = cuio_getptr(uio, k + blks, &k); 304 if (iov == NULL) 305 return EINVAL; 306 307 i -= blks; 308 309 /* Could be done... */ 310 if (i == 0) 311 break; 312 } 313 314 /* 315 * Warning: idat may point to garbage here, but 316 * we only use it in the while() loop, only if 317 * there are indeed enough data. 318 */ 319 idat = (char *)iov->iov_base + k; 320 321 while (iov->iov_len >= k + blks && i > 0) { 322 if (crd->crd_flags & CRD_F_ENCRYPT) { 323 /* XOR with previous block/IV */ 324 for (j = 0; j < blks; j++) 325 idat[j] ^= ivp[j]; 326 327 exf->encrypt(sw->sw_kschedule, idat); 328 ivp = idat; 329 } else { /* decrypt */ 330 /* 331 * Keep encrypted block to be used 332 * in next block's processing. 333 */ 334 if (ivp == iv) 335 bcopy(idat, piv, blks); 336 else 337 bcopy(idat, iv, blks); 338 339 exf->decrypt(sw->sw_kschedule, idat); 340 341 /* XOR with previous block/IV */ 342 for (j = 0; j < blks; j++) 343 idat[j] ^= ivp[j]; 344 345 if (ivp == iv) 346 bcopy(piv, iv, blks); 347 else 348 ivp = iv; 349 } 350 351 idat += blks; 352 k += blks; 353 i -= blks; 354 } 355 if (k == iov->iov_len) { 356 iov++; 357 k = 0; 358 } 359 } 360 361 return 0; /* Done with iovec encryption/decryption */ 362 } else { /* contiguous buffer */ 363 if (crd->crd_flags & CRD_F_ENCRYPT) { 364 for (i = crd->crd_skip; 365 i < crd->crd_skip + crd->crd_len; i += blks) { 366 /* XOR with the IV/previous block, as appropriate. */ 367 if (i == crd->crd_skip) 368 for (k = 0; k < blks; k++) 369 buf[i + k] ^= ivp[k]; 370 else 371 for (k = 0; k < blks; k++) 372 buf[i + k] ^= buf[i + k - blks]; 373 exf->encrypt(sw->sw_kschedule, buf + i); 374 } 375 } else { /* Decrypt */ 376 /* 377 * Start at the end, so we don't need to keep the encrypted 378 * block as the IV for the next block. 379 */ 380 for (i = crd->crd_skip + crd->crd_len - blks; 381 i >= crd->crd_skip; i -= blks) { 382 exf->decrypt(sw->sw_kschedule, buf + i); 383 384 /* XOR with the IV/previous block, as appropriate */ 385 if (i == crd->crd_skip) 386 for (k = 0; k < blks; k++) 387 buf[i + k] ^= ivp[k]; 388 else 389 for (k = 0; k < blks; k++) 390 buf[i + k] ^= buf[i + k - blks]; 391 } 392 } 393 394 return 0; /* Done with contiguous buffer encryption/decryption */ 395 } 396 397 /* Unreachable */ 398 return EINVAL; 399 } 400 401 static void 402 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 403 int klen) 404 { 405 int k; 406 407 klen /= 8; 408 409 switch (axf->type) { 410 case CRYPTO_MD5_HMAC: 411 case CRYPTO_SHA1_HMAC: 412 case CRYPTO_SHA2_256_HMAC: 413 case CRYPTO_SHA2_384_HMAC: 414 case CRYPTO_SHA2_512_HMAC: 415 case CRYPTO_NULL_HMAC: 416 case CRYPTO_RIPEMD160_HMAC: 417 for (k = 0; k < klen; k++) 418 key[k] ^= HMAC_IPAD_VAL; 419 420 axf->Init(sw->sw_ictx); 421 axf->Update(sw->sw_ictx, key, klen); 422 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 423 424 for (k = 0; k < klen; k++) 425 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 426 427 axf->Init(sw->sw_octx); 428 axf->Update(sw->sw_octx, key, klen); 429 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 430 431 for (k = 0; k < klen; k++) 432 key[k] ^= HMAC_OPAD_VAL; 433 break; 434 case CRYPTO_MD5_KPDK: 435 case CRYPTO_SHA1_KPDK: 436 sw->sw_klen = klen; 437 bcopy(key, sw->sw_octx, klen); 438 axf->Init(sw->sw_ictx); 439 axf->Update(sw->sw_ictx, key, klen); 440 axf->Final(NULL, sw->sw_ictx); 441 break; 442 default: 443 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 444 "doesn't use keys.\n", __func__, axf->type); 445 } 446 } 447 448 /* 449 * Compute keyed-hash authenticator. 450 */ 451 static int 452 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 453 int flags) 454 { 455 unsigned char aalg[HASH_MAX_LEN]; 456 struct auth_hash *axf; 457 union authctx ctx; 458 int err; 459 460 if (sw->sw_ictx == 0) 461 return EINVAL; 462 463 axf = sw->sw_axf; 464 465 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 466 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 467 468 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 469 470 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 471 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 472 if (err) 473 return err; 474 475 switch (sw->sw_alg) { 476 case CRYPTO_MD5_HMAC: 477 case CRYPTO_SHA1_HMAC: 478 case CRYPTO_SHA2_256_HMAC: 479 case CRYPTO_SHA2_384_HMAC: 480 case CRYPTO_SHA2_512_HMAC: 481 case CRYPTO_RIPEMD160_HMAC: 482 if (sw->sw_octx == NULL) 483 return EINVAL; 484 485 axf->Final(aalg, &ctx); 486 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 487 axf->Update(&ctx, aalg, axf->hashsize); 488 axf->Final(aalg, &ctx); 489 break; 490 491 case CRYPTO_MD5_KPDK: 492 case CRYPTO_SHA1_KPDK: 493 if (sw->sw_octx == NULL) 494 return EINVAL; 495 496 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 497 axf->Final(aalg, &ctx); 498 break; 499 500 case CRYPTO_NULL_HMAC: 501 axf->Final(aalg, &ctx); 502 break; 503 } 504 505 /* Inject the authentication data */ 506 crypto_copyback(flags, buf, crd->crd_inject, 507 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 508 return 0; 509 } 510 511 /* 512 * Apply a compression/decompression algorithm 513 */ 514 static int 515 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 516 caddr_t buf, int flags) 517 { 518 u_int8_t *data, *out; 519 struct comp_algo *cxf; 520 int adj; 521 u_int32_t result; 522 523 cxf = sw->sw_cxf; 524 525 /* We must handle the whole buffer of data in one time 526 * then if there is not all the data in the mbuf, we must 527 * copy in a buffer. 528 */ 529 530 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 531 if (data == NULL) 532 return (EINVAL); 533 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 534 535 if (crd->crd_flags & CRD_F_COMP) 536 result = cxf->compress(data, crd->crd_len, &out); 537 else 538 result = cxf->decompress(data, crd->crd_len, &out); 539 540 free(data, M_CRYPTO_DATA); 541 if (result == 0) 542 return EINVAL; 543 544 /* Copy back the (de)compressed data. m_copyback is 545 * extending the mbuf as necessary. 546 */ 547 sw->sw_size = result; 548 /* Check the compressed size when doing compression */ 549 if (crd->crd_flags & CRD_F_COMP) { 550 if (result > crd->crd_len) { 551 /* Compression was useless, we lost time */ 552 free(out, M_CRYPTO_DATA); 553 return 0; 554 } 555 } 556 557 crypto_copyback(flags, buf, crd->crd_skip, result, out); 558 if (result < crd->crd_len) { 559 adj = result - crd->crd_len; 560 if (flags & CRYPTO_F_IMBUF) { 561 adj = result - crd->crd_len; 562 m_adj((struct mbuf *)buf, adj); 563 } else if (flags & CRYPTO_F_IOV) { 564 struct uio *uio = (struct uio *)buf; 565 int ind; 566 567 adj = crd->crd_len - result; 568 ind = uio->uio_iovcnt - 1; 569 570 while (adj > 0 && ind >= 0) { 571 if (adj < uio->uio_iov[ind].iov_len) { 572 uio->uio_iov[ind].iov_len -= adj; 573 break; 574 } 575 576 adj -= uio->uio_iov[ind].iov_len; 577 uio->uio_iov[ind].iov_len = 0; 578 ind--; 579 uio->uio_iovcnt--; 580 } 581 } 582 } 583 free(out, M_CRYPTO_DATA); 584 return 0; 585 } 586 587 /* 588 * Generate a new software session. 589 */ 590 static int 591 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 592 { 593 struct swcr_data **swd; 594 struct auth_hash *axf; 595 struct enc_xform *txf; 596 struct comp_algo *cxf; 597 u_int32_t i; 598 int error; 599 600 if (sid == NULL || cri == NULL) 601 return EINVAL; 602 603 if (swcr_sessions) { 604 for (i = 1; i < swcr_sesnum; i++) 605 if (swcr_sessions[i] == NULL) 606 break; 607 } else 608 i = 1; /* NB: to silence compiler warning */ 609 610 if (swcr_sessions == NULL || i == swcr_sesnum) { 611 if (swcr_sessions == NULL) { 612 i = 1; /* We leave swcr_sessions[0] empty */ 613 swcr_sesnum = CRYPTO_SW_SESSIONS; 614 } else 615 swcr_sesnum *= 2; 616 617 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 618 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 619 if (swd == NULL) { 620 /* Reset session number */ 621 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 622 swcr_sesnum = 0; 623 else 624 swcr_sesnum /= 2; 625 return ENOBUFS; 626 } 627 628 /* Copy existing sessions */ 629 if (swcr_sessions != NULL) { 630 bcopy(swcr_sessions, swd, 631 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 632 free(swcr_sessions, M_CRYPTO_DATA); 633 } 634 635 swcr_sessions = swd; 636 } 637 638 swd = &swcr_sessions[i]; 639 *sid = i; 640 641 while (cri) { 642 *swd = malloc(sizeof(struct swcr_data), 643 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 644 if (*swd == NULL) { 645 swcr_freesession(dev, i); 646 return ENOBUFS; 647 } 648 649 switch (cri->cri_alg) { 650 case CRYPTO_DES_CBC: 651 txf = &enc_xform_des; 652 goto enccommon; 653 case CRYPTO_3DES_CBC: 654 txf = &enc_xform_3des; 655 goto enccommon; 656 case CRYPTO_BLF_CBC: 657 txf = &enc_xform_blf; 658 goto enccommon; 659 case CRYPTO_CAST_CBC: 660 txf = &enc_xform_cast5; 661 goto enccommon; 662 case CRYPTO_SKIPJACK_CBC: 663 txf = &enc_xform_skipjack; 664 goto enccommon; 665 case CRYPTO_RIJNDAEL128_CBC: 666 txf = &enc_xform_rijndael128; 667 goto enccommon; 668 case CRYPTO_CAMELLIA_CBC: 669 txf = &enc_xform_camellia; 670 goto enccommon; 671 case CRYPTO_NULL_CBC: 672 txf = &enc_xform_null; 673 goto enccommon; 674 enccommon: 675 if (cri->cri_key != NULL) { 676 error = txf->setkey(&((*swd)->sw_kschedule), 677 cri->cri_key, cri->cri_klen / 8); 678 if (error) { 679 swcr_freesession(dev, i); 680 return error; 681 } 682 } 683 (*swd)->sw_exf = txf; 684 break; 685 686 case CRYPTO_MD5_HMAC: 687 axf = &auth_hash_hmac_md5; 688 goto authcommon; 689 case CRYPTO_SHA1_HMAC: 690 axf = &auth_hash_hmac_sha1; 691 goto authcommon; 692 case CRYPTO_SHA2_256_HMAC: 693 axf = &auth_hash_hmac_sha2_256; 694 goto authcommon; 695 case CRYPTO_SHA2_384_HMAC: 696 axf = &auth_hash_hmac_sha2_384; 697 goto authcommon; 698 case CRYPTO_SHA2_512_HMAC: 699 axf = &auth_hash_hmac_sha2_512; 700 goto authcommon; 701 case CRYPTO_NULL_HMAC: 702 axf = &auth_hash_null; 703 goto authcommon; 704 case CRYPTO_RIPEMD160_HMAC: 705 axf = &auth_hash_hmac_ripemd_160; 706 authcommon: 707 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 708 M_NOWAIT); 709 if ((*swd)->sw_ictx == NULL) { 710 swcr_freesession(dev, i); 711 return ENOBUFS; 712 } 713 714 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 715 M_NOWAIT); 716 if ((*swd)->sw_octx == NULL) { 717 swcr_freesession(dev, i); 718 return ENOBUFS; 719 } 720 721 if (cri->cri_key != NULL) { 722 swcr_authprepare(axf, *swd, cri->cri_key, 723 cri->cri_klen); 724 } 725 726 (*swd)->sw_mlen = cri->cri_mlen; 727 (*swd)->sw_axf = axf; 728 break; 729 730 case CRYPTO_MD5_KPDK: 731 axf = &auth_hash_key_md5; 732 goto auth2common; 733 734 case CRYPTO_SHA1_KPDK: 735 axf = &auth_hash_key_sha1; 736 auth2common: 737 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 738 M_NOWAIT); 739 if ((*swd)->sw_ictx == NULL) { 740 swcr_freesession(dev, i); 741 return ENOBUFS; 742 } 743 744 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 745 M_CRYPTO_DATA, M_NOWAIT); 746 if ((*swd)->sw_octx == NULL) { 747 swcr_freesession(dev, i); 748 return ENOBUFS; 749 } 750 751 /* Store the key so we can "append" it to the payload */ 752 if (cri->cri_key != NULL) { 753 swcr_authprepare(axf, *swd, cri->cri_key, 754 cri->cri_klen); 755 } 756 757 (*swd)->sw_mlen = cri->cri_mlen; 758 (*swd)->sw_axf = axf; 759 break; 760 #ifdef notdef 761 case CRYPTO_MD5: 762 axf = &auth_hash_md5; 763 goto auth3common; 764 765 case CRYPTO_SHA1: 766 axf = &auth_hash_sha1; 767 auth3common: 768 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 769 M_NOWAIT); 770 if ((*swd)->sw_ictx == NULL) { 771 swcr_freesession(dev, i); 772 return ENOBUFS; 773 } 774 775 axf->Init((*swd)->sw_ictx); 776 (*swd)->sw_mlen = cri->cri_mlen; 777 (*swd)->sw_axf = axf; 778 break; 779 #endif 780 case CRYPTO_DEFLATE_COMP: 781 cxf = &comp_algo_deflate; 782 (*swd)->sw_cxf = cxf; 783 break; 784 default: 785 swcr_freesession(dev, i); 786 return EINVAL; 787 } 788 789 (*swd)->sw_alg = cri->cri_alg; 790 cri = cri->cri_next; 791 swd = &((*swd)->sw_next); 792 } 793 return 0; 794 } 795 796 /* 797 * Free a session. 798 */ 799 static int 800 swcr_freesession(device_t dev, u_int64_t tid) 801 { 802 struct swcr_data *swd; 803 struct enc_xform *txf; 804 struct auth_hash *axf; 805 struct comp_algo *cxf; 806 u_int32_t sid = CRYPTO_SESID2LID(tid); 807 808 if (sid > swcr_sesnum || swcr_sessions == NULL || 809 swcr_sessions[sid] == NULL) 810 return EINVAL; 811 812 /* Silently accept and return */ 813 if (sid == 0) 814 return 0; 815 816 while ((swd = swcr_sessions[sid]) != NULL) { 817 swcr_sessions[sid] = swd->sw_next; 818 819 switch (swd->sw_alg) { 820 case CRYPTO_DES_CBC: 821 case CRYPTO_3DES_CBC: 822 case CRYPTO_BLF_CBC: 823 case CRYPTO_CAST_CBC: 824 case CRYPTO_SKIPJACK_CBC: 825 case CRYPTO_RIJNDAEL128_CBC: 826 case CRYPTO_CAMELLIA_CBC: 827 case CRYPTO_NULL_CBC: 828 txf = swd->sw_exf; 829 830 if (swd->sw_kschedule) 831 txf->zerokey(&(swd->sw_kschedule)); 832 break; 833 834 case CRYPTO_MD5_HMAC: 835 case CRYPTO_SHA1_HMAC: 836 case CRYPTO_SHA2_256_HMAC: 837 case CRYPTO_SHA2_384_HMAC: 838 case CRYPTO_SHA2_512_HMAC: 839 case CRYPTO_RIPEMD160_HMAC: 840 case CRYPTO_NULL_HMAC: 841 axf = swd->sw_axf; 842 843 if (swd->sw_ictx) { 844 bzero(swd->sw_ictx, axf->ctxsize); 845 free(swd->sw_ictx, M_CRYPTO_DATA); 846 } 847 if (swd->sw_octx) { 848 bzero(swd->sw_octx, axf->ctxsize); 849 free(swd->sw_octx, M_CRYPTO_DATA); 850 } 851 break; 852 853 case CRYPTO_MD5_KPDK: 854 case CRYPTO_SHA1_KPDK: 855 axf = swd->sw_axf; 856 857 if (swd->sw_ictx) { 858 bzero(swd->sw_ictx, axf->ctxsize); 859 free(swd->sw_ictx, M_CRYPTO_DATA); 860 } 861 if (swd->sw_octx) { 862 bzero(swd->sw_octx, swd->sw_klen); 863 free(swd->sw_octx, M_CRYPTO_DATA); 864 } 865 break; 866 867 case CRYPTO_MD5: 868 case CRYPTO_SHA1: 869 axf = swd->sw_axf; 870 871 if (swd->sw_ictx) 872 free(swd->sw_ictx, M_CRYPTO_DATA); 873 break; 874 875 case CRYPTO_DEFLATE_COMP: 876 cxf = swd->sw_cxf; 877 break; 878 } 879 880 free(swd, M_CRYPTO_DATA); 881 } 882 return 0; 883 } 884 885 /* 886 * Process a software request. 887 */ 888 static int 889 swcr_process(device_t dev, struct cryptop *crp, int hint) 890 { 891 struct cryptodesc *crd; 892 struct swcr_data *sw; 893 u_int32_t lid; 894 895 /* Sanity check */ 896 if (crp == NULL) 897 return EINVAL; 898 899 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 900 crp->crp_etype = EINVAL; 901 goto done; 902 } 903 904 lid = crp->crp_sid & 0xffffffff; 905 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { 906 crp->crp_etype = ENOENT; 907 goto done; 908 } 909 910 /* Go through crypto descriptors, processing as we go */ 911 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 912 /* 913 * Find the crypto context. 914 * 915 * XXX Note that the logic here prevents us from having 916 * XXX the same algorithm multiple times in a session 917 * XXX (or rather, we can but it won't give us the right 918 * XXX results). To do that, we'd need some way of differentiating 919 * XXX between the various instances of an algorithm (so we can 920 * XXX locate the correct crypto context). 921 */ 922 for (sw = swcr_sessions[lid]; 923 sw && sw->sw_alg != crd->crd_alg; 924 sw = sw->sw_next) 925 ; 926 927 /* No such context ? */ 928 if (sw == NULL) { 929 crp->crp_etype = EINVAL; 930 goto done; 931 } 932 switch (sw->sw_alg) { 933 case CRYPTO_DES_CBC: 934 case CRYPTO_3DES_CBC: 935 case CRYPTO_BLF_CBC: 936 case CRYPTO_CAST_CBC: 937 case CRYPTO_SKIPJACK_CBC: 938 case CRYPTO_RIJNDAEL128_CBC: 939 case CRYPTO_CAMELLIA_CBC: 940 if ((crp->crp_etype = swcr_encdec(crd, sw, 941 crp->crp_buf, crp->crp_flags)) != 0) 942 goto done; 943 break; 944 case CRYPTO_NULL_CBC: 945 crp->crp_etype = 0; 946 break; 947 case CRYPTO_MD5_HMAC: 948 case CRYPTO_SHA1_HMAC: 949 case CRYPTO_SHA2_256_HMAC: 950 case CRYPTO_SHA2_384_HMAC: 951 case CRYPTO_SHA2_512_HMAC: 952 case CRYPTO_RIPEMD160_HMAC: 953 case CRYPTO_NULL_HMAC: 954 case CRYPTO_MD5_KPDK: 955 case CRYPTO_SHA1_KPDK: 956 case CRYPTO_MD5: 957 case CRYPTO_SHA1: 958 if ((crp->crp_etype = swcr_authcompute(crd, sw, 959 crp->crp_buf, crp->crp_flags)) != 0) 960 goto done; 961 break; 962 963 case CRYPTO_DEFLATE_COMP: 964 if ((crp->crp_etype = swcr_compdec(crd, sw, 965 crp->crp_buf, crp->crp_flags)) != 0) 966 goto done; 967 else 968 crp->crp_olen = (int)sw->sw_size; 969 break; 970 971 default: 972 /* Unknown/unsupported algorithm */ 973 crp->crp_etype = EINVAL; 974 goto done; 975 } 976 } 977 978 done: 979 crypto_done(crp); 980 return 0; 981 } 982 983 static void 984 swcr_identify(device_t *dev, device_t parent) 985 { 986 /* NB: order 10 is so we get attached after h/w devices */ 987 if (device_find_child(parent, "cryptosoft", -1) == NULL && 988 BUS_ADD_CHILD(parent, 10, "cryptosoft", -1) == 0) 989 panic("cryptosoft: could not attach"); 990 } 991 992 static int 993 swcr_probe(device_t dev) 994 { 995 device_set_desc(dev, "software crypto"); 996 return (0); 997 } 998 999 static int 1000 swcr_attach(device_t dev) 1001 { 1002 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 1003 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 1004 1005 swcr_id = crypto_get_driverid(dev, 1006 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1007 if (swcr_id < 0) { 1008 device_printf(dev, "cannot initialize!"); 1009 return ENOMEM; 1010 } 1011 #define REGISTER(alg) \ 1012 crypto_register(swcr_id, alg, 0,0) 1013 REGISTER(CRYPTO_DES_CBC); 1014 REGISTER(CRYPTO_3DES_CBC); 1015 REGISTER(CRYPTO_BLF_CBC); 1016 REGISTER(CRYPTO_CAST_CBC); 1017 REGISTER(CRYPTO_SKIPJACK_CBC); 1018 REGISTER(CRYPTO_NULL_CBC); 1019 REGISTER(CRYPTO_MD5_HMAC); 1020 REGISTER(CRYPTO_SHA1_HMAC); 1021 REGISTER(CRYPTO_SHA2_256_HMAC); 1022 REGISTER(CRYPTO_SHA2_384_HMAC); 1023 REGISTER(CRYPTO_SHA2_512_HMAC); 1024 REGISTER(CRYPTO_RIPEMD160_HMAC); 1025 REGISTER(CRYPTO_NULL_HMAC); 1026 REGISTER(CRYPTO_MD5_KPDK); 1027 REGISTER(CRYPTO_SHA1_KPDK); 1028 REGISTER(CRYPTO_MD5); 1029 REGISTER(CRYPTO_SHA1); 1030 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1031 REGISTER(CRYPTO_CAMELLIA_CBC); 1032 REGISTER(CRYPTO_DEFLATE_COMP); 1033 #undef REGISTER 1034 1035 return 0; 1036 } 1037 1038 static void 1039 swcr_detach(device_t dev) 1040 { 1041 crypto_unregister_all(swcr_id); 1042 if (swcr_sessions != NULL) 1043 free(swcr_sessions, M_CRYPTO_DATA); 1044 } 1045 1046 static device_method_t swcr_methods[] = { 1047 DEVMETHOD(device_identify, swcr_identify), 1048 DEVMETHOD(device_probe, swcr_probe), 1049 DEVMETHOD(device_attach, swcr_attach), 1050 DEVMETHOD(device_detach, swcr_detach), 1051 1052 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1053 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1054 DEVMETHOD(cryptodev_process, swcr_process), 1055 1056 {0, 0}, 1057 }; 1058 1059 static driver_t swcr_driver = { 1060 "cryptosoft", 1061 swcr_methods, 1062 0, /* NB: no softc */ 1063 }; 1064 static devclass_t swcr_devclass; 1065 1066 /* 1067 * NB: We explicitly reference the crypto module so we 1068 * get the necessary ordering when built as a loadable 1069 * module. This is required because we bundle the crypto 1070 * module code together with the cryptosoft driver (otherwise 1071 * normal module dependencies would handle things). 1072 */ 1073 extern int crypto_modevent(struct module *, int, void *); 1074 /* XXX where to attach */ 1075 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1076 MODULE_VERSION(cryptosoft, 1); 1077 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1078