1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * 13 * Permission to use, copy, and modify this software with or without fee 14 * is hereby granted, provided that this entire notice is included in 15 * all source code copies of any software which is or includes a copy or 16 * modification of this software. 17 * 18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 22 * PURPOSE. 23 */ 24 25 #include <sys/cdefs.h> 26 __FBSDID("$FreeBSD$"); 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/malloc.h> 31 #include <sys/mbuf.h> 32 #include <sys/module.h> 33 #include <sys/sysctl.h> 34 #include <sys/errno.h> 35 #include <sys/random.h> 36 #include <sys/kernel.h> 37 #include <sys/uio.h> 38 39 #include <crypto/blowfish/blowfish.h> 40 #include <crypto/sha1.h> 41 #include <opencrypto/rmd160.h> 42 #include <opencrypto/cast.h> 43 #include <opencrypto/skipjack.h> 44 #include <sys/md5.h> 45 46 #include <opencrypto/cryptodev.h> 47 #include <opencrypto/cryptosoft.h> 48 #include <opencrypto/xform.h> 49 50 #include <sys/kobj.h> 51 #include <sys/bus.h> 52 #include "cryptodev_if.h" 53 54 static int32_t swcr_id; 55 static struct swcr_data **swcr_sessions = NULL; 56 static u_int32_t swcr_sesnum; 57 58 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 59 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 60 61 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 62 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 63 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 64 static int swcr_freesession(device_t dev, u_int64_t tid); 65 66 /* 67 * Apply a symmetric encryption/decryption algorithm. 68 */ 69 static int 70 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 71 int flags) 72 { 73 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 74 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; 75 struct enc_xform *exf; 76 int i, k, j, blks; 77 78 exf = sw->sw_exf; 79 blks = exf->blocksize; 80 81 /* Check for non-padded data */ 82 if (crd->crd_len % blks) 83 return EINVAL; 84 85 /* Initialize the IV */ 86 if (crd->crd_flags & CRD_F_ENCRYPT) { 87 /* IV explicitly provided ? */ 88 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 89 bcopy(crd->crd_iv, iv, blks); 90 else 91 arc4rand(iv, blks, 0); 92 93 /* Do we need to write the IV */ 94 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 95 crypto_copyback(flags, buf, crd->crd_inject, blks, iv); 96 97 } else { /* Decryption */ 98 /* IV explicitly provided ? */ 99 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 100 bcopy(crd->crd_iv, iv, blks); 101 else { 102 /* Get IV off buf */ 103 crypto_copydata(flags, buf, crd->crd_inject, blks, iv); 104 } 105 } 106 107 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 108 int error; 109 110 if (sw->sw_kschedule) 111 exf->zerokey(&(sw->sw_kschedule)); 112 error = exf->setkey(&sw->sw_kschedule, 113 crd->crd_key, crd->crd_klen / 8); 114 if (error) 115 return (error); 116 } 117 ivp = iv; 118 119 if (flags & CRYPTO_F_IMBUF) { 120 struct mbuf *m = (struct mbuf *) buf; 121 122 /* Find beginning of data */ 123 m = m_getptr(m, crd->crd_skip, &k); 124 if (m == NULL) 125 return EINVAL; 126 127 i = crd->crd_len; 128 129 while (i > 0) { 130 /* 131 * If there's insufficient data at the end of 132 * an mbuf, we have to do some copying. 133 */ 134 if (m->m_len < k + blks && m->m_len != k) { 135 m_copydata(m, k, blks, blk); 136 137 /* Actual encryption/decryption */ 138 if (crd->crd_flags & CRD_F_ENCRYPT) { 139 /* XOR with previous block */ 140 for (j = 0; j < blks; j++) 141 blk[j] ^= ivp[j]; 142 143 exf->encrypt(sw->sw_kschedule, blk); 144 145 /* 146 * Keep encrypted block for XOR'ing 147 * with next block 148 */ 149 bcopy(blk, iv, blks); 150 ivp = iv; 151 } else { /* decrypt */ 152 /* 153 * Keep encrypted block for XOR'ing 154 * with next block 155 */ 156 if (ivp == iv) 157 bcopy(blk, piv, blks); 158 else 159 bcopy(blk, iv, blks); 160 161 exf->decrypt(sw->sw_kschedule, blk); 162 163 /* XOR with previous block */ 164 for (j = 0; j < blks; j++) 165 blk[j] ^= ivp[j]; 166 167 if (ivp == iv) 168 bcopy(piv, iv, blks); 169 else 170 ivp = iv; 171 } 172 173 /* Copy back decrypted block */ 174 m_copyback(m, k, blks, blk); 175 176 /* Advance pointer */ 177 m = m_getptr(m, k + blks, &k); 178 if (m == NULL) 179 return EINVAL; 180 181 i -= blks; 182 183 /* Could be done... */ 184 if (i == 0) 185 break; 186 } 187 188 /* Skip possibly empty mbufs */ 189 if (k == m->m_len) { 190 for (m = m->m_next; m && m->m_len == 0; 191 m = m->m_next) 192 ; 193 k = 0; 194 } 195 196 /* Sanity check */ 197 if (m == NULL) 198 return EINVAL; 199 200 /* 201 * Warning: idat may point to garbage here, but 202 * we only use it in the while() loop, only if 203 * there are indeed enough data. 204 */ 205 idat = mtod(m, unsigned char *) + k; 206 207 while (m->m_len >= k + blks && i > 0) { 208 if (crd->crd_flags & CRD_F_ENCRYPT) { 209 /* XOR with previous block/IV */ 210 for (j = 0; j < blks; j++) 211 idat[j] ^= ivp[j]; 212 213 exf->encrypt(sw->sw_kschedule, idat); 214 ivp = idat; 215 } else { /* decrypt */ 216 /* 217 * Keep encrypted block to be used 218 * in next block's processing. 219 */ 220 if (ivp == iv) 221 bcopy(idat, piv, blks); 222 else 223 bcopy(idat, iv, blks); 224 225 exf->decrypt(sw->sw_kschedule, idat); 226 227 /* XOR with previous block/IV */ 228 for (j = 0; j < blks; j++) 229 idat[j] ^= ivp[j]; 230 231 if (ivp == iv) 232 bcopy(piv, iv, blks); 233 else 234 ivp = iv; 235 } 236 237 idat += blks; 238 k += blks; 239 i -= blks; 240 } 241 } 242 243 return 0; /* Done with mbuf encryption/decryption */ 244 } else if (flags & CRYPTO_F_IOV) { 245 struct uio *uio = (struct uio *) buf; 246 struct iovec *iov; 247 248 /* Find beginning of data */ 249 iov = cuio_getptr(uio, crd->crd_skip, &k); 250 if (iov == NULL) 251 return EINVAL; 252 253 i = crd->crd_len; 254 255 while (i > 0) { 256 /* 257 * If there's insufficient data at the end of 258 * an iovec, we have to do some copying. 259 */ 260 if (iov->iov_len < k + blks && iov->iov_len != k) { 261 cuio_copydata(uio, k, blks, blk); 262 263 /* Actual encryption/decryption */ 264 if (crd->crd_flags & CRD_F_ENCRYPT) { 265 /* XOR with previous block */ 266 for (j = 0; j < blks; j++) 267 blk[j] ^= ivp[j]; 268 269 exf->encrypt(sw->sw_kschedule, blk); 270 271 /* 272 * Keep encrypted block for XOR'ing 273 * with next block 274 */ 275 bcopy(blk, iv, blks); 276 ivp = iv; 277 } else { /* decrypt */ 278 /* 279 * Keep encrypted block for XOR'ing 280 * with next block 281 */ 282 if (ivp == iv) 283 bcopy(blk, piv, blks); 284 else 285 bcopy(blk, iv, blks); 286 287 exf->decrypt(sw->sw_kschedule, blk); 288 289 /* XOR with previous block */ 290 for (j = 0; j < blks; j++) 291 blk[j] ^= ivp[j]; 292 293 if (ivp == iv) 294 bcopy(piv, iv, blks); 295 else 296 ivp = iv; 297 } 298 299 /* Copy back decrypted block */ 300 cuio_copyback(uio, k, blks, blk); 301 302 /* Advance pointer */ 303 iov = cuio_getptr(uio, k + blks, &k); 304 if (iov == NULL) 305 return EINVAL; 306 307 i -= blks; 308 309 /* Could be done... */ 310 if (i == 0) 311 break; 312 } 313 314 /* 315 * Warning: idat may point to garbage here, but 316 * we only use it in the while() loop, only if 317 * there are indeed enough data. 318 */ 319 idat = (char *)iov->iov_base + k; 320 321 while (iov->iov_len >= k + blks && i > 0) { 322 if (crd->crd_flags & CRD_F_ENCRYPT) { 323 /* XOR with previous block/IV */ 324 for (j = 0; j < blks; j++) 325 idat[j] ^= ivp[j]; 326 327 exf->encrypt(sw->sw_kschedule, idat); 328 ivp = idat; 329 } else { /* decrypt */ 330 /* 331 * Keep encrypted block to be used 332 * in next block's processing. 333 */ 334 if (ivp == iv) 335 bcopy(idat, piv, blks); 336 else 337 bcopy(idat, iv, blks); 338 339 exf->decrypt(sw->sw_kschedule, idat); 340 341 /* XOR with previous block/IV */ 342 for (j = 0; j < blks; j++) 343 idat[j] ^= ivp[j]; 344 345 if (ivp == iv) 346 bcopy(piv, iv, blks); 347 else 348 ivp = iv; 349 } 350 351 idat += blks; 352 k += blks; 353 i -= blks; 354 } 355 } 356 357 return 0; /* Done with iovec encryption/decryption */ 358 } else { /* contiguous buffer */ 359 if (crd->crd_flags & CRD_F_ENCRYPT) { 360 for (i = crd->crd_skip; 361 i < crd->crd_skip + crd->crd_len; i += blks) { 362 /* XOR with the IV/previous block, as appropriate. */ 363 if (i == crd->crd_skip) 364 for (k = 0; k < blks; k++) 365 buf[i + k] ^= ivp[k]; 366 else 367 for (k = 0; k < blks; k++) 368 buf[i + k] ^= buf[i + k - blks]; 369 exf->encrypt(sw->sw_kschedule, buf + i); 370 } 371 } else { /* Decrypt */ 372 /* 373 * Start at the end, so we don't need to keep the encrypted 374 * block as the IV for the next block. 375 */ 376 for (i = crd->crd_skip + crd->crd_len - blks; 377 i >= crd->crd_skip; i -= blks) { 378 exf->decrypt(sw->sw_kschedule, buf + i); 379 380 /* XOR with the IV/previous block, as appropriate */ 381 if (i == crd->crd_skip) 382 for (k = 0; k < blks; k++) 383 buf[i + k] ^= ivp[k]; 384 else 385 for (k = 0; k < blks; k++) 386 buf[i + k] ^= buf[i + k - blks]; 387 } 388 } 389 390 return 0; /* Done with contiguous buffer encryption/decryption */ 391 } 392 393 /* Unreachable */ 394 return EINVAL; 395 } 396 397 static void 398 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 399 int klen) 400 { 401 int k; 402 403 klen /= 8; 404 405 switch (axf->type) { 406 case CRYPTO_MD5_HMAC: 407 case CRYPTO_SHA1_HMAC: 408 case CRYPTO_SHA2_256_HMAC: 409 case CRYPTO_SHA2_384_HMAC: 410 case CRYPTO_SHA2_512_HMAC: 411 case CRYPTO_NULL_HMAC: 412 case CRYPTO_RIPEMD160_HMAC: 413 for (k = 0; k < klen; k++) 414 key[k] ^= HMAC_IPAD_VAL; 415 416 axf->Init(sw->sw_ictx); 417 axf->Update(sw->sw_ictx, key, klen); 418 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 419 420 for (k = 0; k < klen; k++) 421 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 422 423 axf->Init(sw->sw_octx); 424 axf->Update(sw->sw_octx, key, klen); 425 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 426 427 for (k = 0; k < klen; k++) 428 key[k] ^= HMAC_OPAD_VAL; 429 break; 430 case CRYPTO_MD5_KPDK: 431 case CRYPTO_SHA1_KPDK: 432 sw->sw_klen = klen; 433 bcopy(key, sw->sw_octx, klen); 434 axf->Init(sw->sw_ictx); 435 axf->Update(sw->sw_ictx, key, klen); 436 axf->Final(NULL, sw->sw_ictx); 437 break; 438 default: 439 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 440 "doesn't use keys.\n", __func__, axf->type); 441 } 442 } 443 444 /* 445 * Compute keyed-hash authenticator. 446 */ 447 static int 448 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 449 int flags) 450 { 451 unsigned char aalg[HASH_MAX_LEN]; 452 struct auth_hash *axf; 453 union authctx ctx; 454 int err; 455 456 if (sw->sw_ictx == 0) 457 return EINVAL; 458 459 axf = sw->sw_axf; 460 461 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 462 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 463 464 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 465 466 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 467 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 468 if (err) 469 return err; 470 471 switch (sw->sw_alg) { 472 case CRYPTO_MD5_HMAC: 473 case CRYPTO_SHA1_HMAC: 474 case CRYPTO_SHA2_256_HMAC: 475 case CRYPTO_SHA2_384_HMAC: 476 case CRYPTO_SHA2_512_HMAC: 477 case CRYPTO_RIPEMD160_HMAC: 478 if (sw->sw_octx == NULL) 479 return EINVAL; 480 481 axf->Final(aalg, &ctx); 482 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 483 axf->Update(&ctx, aalg, axf->hashsize); 484 axf->Final(aalg, &ctx); 485 break; 486 487 case CRYPTO_MD5_KPDK: 488 case CRYPTO_SHA1_KPDK: 489 if (sw->sw_octx == NULL) 490 return EINVAL; 491 492 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 493 axf->Final(aalg, &ctx); 494 break; 495 496 case CRYPTO_NULL_HMAC: 497 axf->Final(aalg, &ctx); 498 break; 499 } 500 501 /* Inject the authentication data */ 502 crypto_copyback(flags, buf, crd->crd_inject, 503 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 504 return 0; 505 } 506 507 /* 508 * Apply a compression/decompression algorithm 509 */ 510 static int 511 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 512 caddr_t buf, int flags) 513 { 514 u_int8_t *data, *out; 515 struct comp_algo *cxf; 516 int adj; 517 u_int32_t result; 518 519 cxf = sw->sw_cxf; 520 521 /* We must handle the whole buffer of data in one time 522 * then if there is not all the data in the mbuf, we must 523 * copy in a buffer. 524 */ 525 526 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 527 if (data == NULL) 528 return (EINVAL); 529 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 530 531 if (crd->crd_flags & CRD_F_COMP) 532 result = cxf->compress(data, crd->crd_len, &out); 533 else 534 result = cxf->decompress(data, crd->crd_len, &out); 535 536 FREE(data, M_CRYPTO_DATA); 537 if (result == 0) 538 return EINVAL; 539 540 /* Copy back the (de)compressed data. m_copyback is 541 * extending the mbuf as necessary. 542 */ 543 sw->sw_size = result; 544 /* Check the compressed size when doing compression */ 545 if (crd->crd_flags & CRD_F_COMP) { 546 if (result > crd->crd_len) { 547 /* Compression was useless, we lost time */ 548 FREE(out, M_CRYPTO_DATA); 549 return 0; 550 } 551 } 552 553 crypto_copyback(flags, buf, crd->crd_skip, result, out); 554 if (result < crd->crd_len) { 555 adj = result - crd->crd_len; 556 if (flags & CRYPTO_F_IMBUF) { 557 adj = result - crd->crd_len; 558 m_adj((struct mbuf *)buf, adj); 559 } else if (flags & CRYPTO_F_IOV) { 560 struct uio *uio = (struct uio *)buf; 561 int ind; 562 563 adj = crd->crd_len - result; 564 ind = uio->uio_iovcnt - 1; 565 566 while (adj > 0 && ind >= 0) { 567 if (adj < uio->uio_iov[ind].iov_len) { 568 uio->uio_iov[ind].iov_len -= adj; 569 break; 570 } 571 572 adj -= uio->uio_iov[ind].iov_len; 573 uio->uio_iov[ind].iov_len = 0; 574 ind--; 575 uio->uio_iovcnt--; 576 } 577 } 578 } 579 FREE(out, M_CRYPTO_DATA); 580 return 0; 581 } 582 583 /* 584 * Generate a new software session. 585 */ 586 static int 587 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 588 { 589 struct swcr_data **swd; 590 struct auth_hash *axf; 591 struct enc_xform *txf; 592 struct comp_algo *cxf; 593 u_int32_t i; 594 int error; 595 596 if (sid == NULL || cri == NULL) 597 return EINVAL; 598 599 if (swcr_sessions) { 600 for (i = 1; i < swcr_sesnum; i++) 601 if (swcr_sessions[i] == NULL) 602 break; 603 } else 604 i = 1; /* NB: to silence compiler warning */ 605 606 if (swcr_sessions == NULL || i == swcr_sesnum) { 607 if (swcr_sessions == NULL) { 608 i = 1; /* We leave swcr_sessions[0] empty */ 609 swcr_sesnum = CRYPTO_SW_SESSIONS; 610 } else 611 swcr_sesnum *= 2; 612 613 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 614 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 615 if (swd == NULL) { 616 /* Reset session number */ 617 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 618 swcr_sesnum = 0; 619 else 620 swcr_sesnum /= 2; 621 return ENOBUFS; 622 } 623 624 /* Copy existing sessions */ 625 if (swcr_sessions != NULL) { 626 bcopy(swcr_sessions, swd, 627 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 628 free(swcr_sessions, M_CRYPTO_DATA); 629 } 630 631 swcr_sessions = swd; 632 } 633 634 swd = &swcr_sessions[i]; 635 *sid = i; 636 637 while (cri) { 638 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data), 639 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 640 if (*swd == NULL) { 641 swcr_freesession(dev, i); 642 return ENOBUFS; 643 } 644 645 switch (cri->cri_alg) { 646 case CRYPTO_DES_CBC: 647 txf = &enc_xform_des; 648 goto enccommon; 649 case CRYPTO_3DES_CBC: 650 txf = &enc_xform_3des; 651 goto enccommon; 652 case CRYPTO_BLF_CBC: 653 txf = &enc_xform_blf; 654 goto enccommon; 655 case CRYPTO_CAST_CBC: 656 txf = &enc_xform_cast5; 657 goto enccommon; 658 case CRYPTO_SKIPJACK_CBC: 659 txf = &enc_xform_skipjack; 660 goto enccommon; 661 case CRYPTO_RIJNDAEL128_CBC: 662 txf = &enc_xform_rijndael128; 663 goto enccommon; 664 case CRYPTO_NULL_CBC: 665 txf = &enc_xform_null; 666 goto enccommon; 667 enccommon: 668 if (cri->cri_key != NULL) { 669 error = txf->setkey(&((*swd)->sw_kschedule), 670 cri->cri_key, cri->cri_klen / 8); 671 if (error) { 672 swcr_freesession(dev, i); 673 return error; 674 } 675 } 676 (*swd)->sw_exf = txf; 677 break; 678 679 case CRYPTO_MD5_HMAC: 680 axf = &auth_hash_hmac_md5; 681 goto authcommon; 682 case CRYPTO_SHA1_HMAC: 683 axf = &auth_hash_hmac_sha1; 684 goto authcommon; 685 case CRYPTO_SHA2_256_HMAC: 686 axf = &auth_hash_hmac_sha2_256; 687 goto authcommon; 688 case CRYPTO_SHA2_384_HMAC: 689 axf = &auth_hash_hmac_sha2_384; 690 goto authcommon; 691 case CRYPTO_SHA2_512_HMAC: 692 axf = &auth_hash_hmac_sha2_512; 693 goto authcommon; 694 case CRYPTO_NULL_HMAC: 695 axf = &auth_hash_null; 696 goto authcommon; 697 case CRYPTO_RIPEMD160_HMAC: 698 axf = &auth_hash_hmac_ripemd_160; 699 authcommon: 700 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 701 M_NOWAIT); 702 if ((*swd)->sw_ictx == NULL) { 703 swcr_freesession(dev, i); 704 return ENOBUFS; 705 } 706 707 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 708 M_NOWAIT); 709 if ((*swd)->sw_octx == NULL) { 710 swcr_freesession(dev, i); 711 return ENOBUFS; 712 } 713 714 if (cri->cri_key != NULL) { 715 swcr_authprepare(axf, *swd, cri->cri_key, 716 cri->cri_klen); 717 } 718 719 (*swd)->sw_mlen = cri->cri_mlen; 720 (*swd)->sw_axf = axf; 721 break; 722 723 case CRYPTO_MD5_KPDK: 724 axf = &auth_hash_key_md5; 725 goto auth2common; 726 727 case CRYPTO_SHA1_KPDK: 728 axf = &auth_hash_key_sha1; 729 auth2common: 730 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 731 M_NOWAIT); 732 if ((*swd)->sw_ictx == NULL) { 733 swcr_freesession(dev, i); 734 return ENOBUFS; 735 } 736 737 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 738 M_CRYPTO_DATA, M_NOWAIT); 739 if ((*swd)->sw_octx == NULL) { 740 swcr_freesession(dev, i); 741 return ENOBUFS; 742 } 743 744 /* Store the key so we can "append" it to the payload */ 745 if (cri->cri_key != NULL) { 746 swcr_authprepare(axf, *swd, cri->cri_key, 747 cri->cri_klen); 748 } 749 750 (*swd)->sw_mlen = cri->cri_mlen; 751 (*swd)->sw_axf = axf; 752 break; 753 #ifdef notdef 754 case CRYPTO_MD5: 755 axf = &auth_hash_md5; 756 goto auth3common; 757 758 case CRYPTO_SHA1: 759 axf = &auth_hash_sha1; 760 auth3common: 761 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 762 M_NOWAIT); 763 if ((*swd)->sw_ictx == NULL) { 764 swcr_freesession(dev, i); 765 return ENOBUFS; 766 } 767 768 axf->Init((*swd)->sw_ictx); 769 (*swd)->sw_mlen = cri->cri_mlen; 770 (*swd)->sw_axf = axf; 771 break; 772 #endif 773 case CRYPTO_DEFLATE_COMP: 774 cxf = &comp_algo_deflate; 775 (*swd)->sw_cxf = cxf; 776 break; 777 default: 778 swcr_freesession(dev, i); 779 return EINVAL; 780 } 781 782 (*swd)->sw_alg = cri->cri_alg; 783 cri = cri->cri_next; 784 swd = &((*swd)->sw_next); 785 } 786 return 0; 787 } 788 789 /* 790 * Free a session. 791 */ 792 static int 793 swcr_freesession(device_t dev, u_int64_t tid) 794 { 795 struct swcr_data *swd; 796 struct enc_xform *txf; 797 struct auth_hash *axf; 798 struct comp_algo *cxf; 799 u_int32_t sid = CRYPTO_SESID2LID(tid); 800 801 if (sid > swcr_sesnum || swcr_sessions == NULL || 802 swcr_sessions[sid] == NULL) 803 return EINVAL; 804 805 /* Silently accept and return */ 806 if (sid == 0) 807 return 0; 808 809 while ((swd = swcr_sessions[sid]) != NULL) { 810 swcr_sessions[sid] = swd->sw_next; 811 812 switch (swd->sw_alg) { 813 case CRYPTO_DES_CBC: 814 case CRYPTO_3DES_CBC: 815 case CRYPTO_BLF_CBC: 816 case CRYPTO_CAST_CBC: 817 case CRYPTO_SKIPJACK_CBC: 818 case CRYPTO_RIJNDAEL128_CBC: 819 case CRYPTO_NULL_CBC: 820 txf = swd->sw_exf; 821 822 if (swd->sw_kschedule) 823 txf->zerokey(&(swd->sw_kschedule)); 824 break; 825 826 case CRYPTO_MD5_HMAC: 827 case CRYPTO_SHA1_HMAC: 828 case CRYPTO_SHA2_256_HMAC: 829 case CRYPTO_SHA2_384_HMAC: 830 case CRYPTO_SHA2_512_HMAC: 831 case CRYPTO_RIPEMD160_HMAC: 832 case CRYPTO_NULL_HMAC: 833 axf = swd->sw_axf; 834 835 if (swd->sw_ictx) { 836 bzero(swd->sw_ictx, axf->ctxsize); 837 free(swd->sw_ictx, M_CRYPTO_DATA); 838 } 839 if (swd->sw_octx) { 840 bzero(swd->sw_octx, axf->ctxsize); 841 free(swd->sw_octx, M_CRYPTO_DATA); 842 } 843 break; 844 845 case CRYPTO_MD5_KPDK: 846 case CRYPTO_SHA1_KPDK: 847 axf = swd->sw_axf; 848 849 if (swd->sw_ictx) { 850 bzero(swd->sw_ictx, axf->ctxsize); 851 free(swd->sw_ictx, M_CRYPTO_DATA); 852 } 853 if (swd->sw_octx) { 854 bzero(swd->sw_octx, swd->sw_klen); 855 free(swd->sw_octx, M_CRYPTO_DATA); 856 } 857 break; 858 859 case CRYPTO_MD5: 860 case CRYPTO_SHA1: 861 axf = swd->sw_axf; 862 863 if (swd->sw_ictx) 864 free(swd->sw_ictx, M_CRYPTO_DATA); 865 break; 866 867 case CRYPTO_DEFLATE_COMP: 868 cxf = swd->sw_cxf; 869 break; 870 } 871 872 FREE(swd, M_CRYPTO_DATA); 873 } 874 return 0; 875 } 876 877 /* 878 * Process a software request. 879 */ 880 static int 881 swcr_process(device_t dev, struct cryptop *crp, int hint) 882 { 883 struct cryptodesc *crd; 884 struct swcr_data *sw; 885 u_int32_t lid; 886 887 /* Sanity check */ 888 if (crp == NULL) 889 return EINVAL; 890 891 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 892 crp->crp_etype = EINVAL; 893 goto done; 894 } 895 896 lid = crp->crp_sid & 0xffffffff; 897 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { 898 crp->crp_etype = ENOENT; 899 goto done; 900 } 901 902 /* Go through crypto descriptors, processing as we go */ 903 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 904 /* 905 * Find the crypto context. 906 * 907 * XXX Note that the logic here prevents us from having 908 * XXX the same algorithm multiple times in a session 909 * XXX (or rather, we can but it won't give us the right 910 * XXX results). To do that, we'd need some way of differentiating 911 * XXX between the various instances of an algorithm (so we can 912 * XXX locate the correct crypto context). 913 */ 914 for (sw = swcr_sessions[lid]; 915 sw && sw->sw_alg != crd->crd_alg; 916 sw = sw->sw_next) 917 ; 918 919 /* No such context ? */ 920 if (sw == NULL) { 921 crp->crp_etype = EINVAL; 922 goto done; 923 } 924 switch (sw->sw_alg) { 925 case CRYPTO_DES_CBC: 926 case CRYPTO_3DES_CBC: 927 case CRYPTO_BLF_CBC: 928 case CRYPTO_CAST_CBC: 929 case CRYPTO_SKIPJACK_CBC: 930 case CRYPTO_RIJNDAEL128_CBC: 931 if ((crp->crp_etype = swcr_encdec(crd, sw, 932 crp->crp_buf, crp->crp_flags)) != 0) 933 goto done; 934 break; 935 case CRYPTO_NULL_CBC: 936 crp->crp_etype = 0; 937 break; 938 case CRYPTO_MD5_HMAC: 939 case CRYPTO_SHA1_HMAC: 940 case CRYPTO_SHA2_256_HMAC: 941 case CRYPTO_SHA2_384_HMAC: 942 case CRYPTO_SHA2_512_HMAC: 943 case CRYPTO_RIPEMD160_HMAC: 944 case CRYPTO_NULL_HMAC: 945 case CRYPTO_MD5_KPDK: 946 case CRYPTO_SHA1_KPDK: 947 case CRYPTO_MD5: 948 case CRYPTO_SHA1: 949 if ((crp->crp_etype = swcr_authcompute(crd, sw, 950 crp->crp_buf, crp->crp_flags)) != 0) 951 goto done; 952 break; 953 954 case CRYPTO_DEFLATE_COMP: 955 if ((crp->crp_etype = swcr_compdec(crd, sw, 956 crp->crp_buf, crp->crp_flags)) != 0) 957 goto done; 958 else 959 crp->crp_olen = (int)sw->sw_size; 960 break; 961 962 default: 963 /* Unknown/unsupported algorithm */ 964 crp->crp_etype = EINVAL; 965 goto done; 966 } 967 } 968 969 done: 970 crypto_done(crp); 971 return 0; 972 } 973 974 static void 975 swcr_identify(device_t *dev, device_t parent) 976 { 977 /* NB: order 10 is so we get attached after h/w devices */ 978 if (device_find_child(parent, "cryptosoft", -1) == NULL && 979 BUS_ADD_CHILD(parent, 10, "cryptosoft", -1) == 0) 980 panic("cryptosoft: could not attach"); 981 } 982 983 static int 984 swcr_probe(device_t dev) 985 { 986 device_set_desc(dev, "software crypto"); 987 return (0); 988 } 989 990 static int 991 swcr_attach(device_t dev) 992 { 993 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 994 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 995 996 swcr_id = crypto_get_driverid(dev, 997 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 998 if (swcr_id < 0) { 999 device_printf(dev, "cannot initialize!"); 1000 return ENOMEM; 1001 } 1002 #define REGISTER(alg) \ 1003 crypto_register(swcr_id, alg, 0,0) 1004 REGISTER(CRYPTO_DES_CBC); 1005 REGISTER(CRYPTO_3DES_CBC); 1006 REGISTER(CRYPTO_BLF_CBC); 1007 REGISTER(CRYPTO_CAST_CBC); 1008 REGISTER(CRYPTO_SKIPJACK_CBC); 1009 REGISTER(CRYPTO_NULL_CBC); 1010 REGISTER(CRYPTO_MD5_HMAC); 1011 REGISTER(CRYPTO_SHA1_HMAC); 1012 REGISTER(CRYPTO_SHA2_256_HMAC); 1013 REGISTER(CRYPTO_SHA2_384_HMAC); 1014 REGISTER(CRYPTO_SHA2_512_HMAC); 1015 REGISTER(CRYPTO_RIPEMD160_HMAC); 1016 REGISTER(CRYPTO_NULL_HMAC); 1017 REGISTER(CRYPTO_MD5_KPDK); 1018 REGISTER(CRYPTO_SHA1_KPDK); 1019 REGISTER(CRYPTO_MD5); 1020 REGISTER(CRYPTO_SHA1); 1021 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1022 REGISTER(CRYPTO_DEFLATE_COMP); 1023 #undef REGISTER 1024 1025 return 0; 1026 } 1027 1028 static void 1029 swcr_detach(device_t dev) 1030 { 1031 crypto_unregister_all(swcr_id); 1032 if (swcr_sessions != NULL) 1033 FREE(swcr_sessions, M_CRYPTO_DATA); 1034 } 1035 1036 static device_method_t swcr_methods[] = { 1037 DEVMETHOD(device_identify, swcr_identify), 1038 DEVMETHOD(device_probe, swcr_probe), 1039 DEVMETHOD(device_attach, swcr_attach), 1040 DEVMETHOD(device_detach, swcr_detach), 1041 1042 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1043 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1044 DEVMETHOD(cryptodev_process, swcr_process), 1045 1046 {0, 0}, 1047 }; 1048 1049 static driver_t swcr_driver = { 1050 "cryptosoft", 1051 swcr_methods, 1052 0, /* NB: no softc */ 1053 }; 1054 static devclass_t swcr_devclass; 1055 1056 /* 1057 * NB: We explicitly reference the crypto module so we 1058 * get the necessary ordering when built as a loadable 1059 * module. This is required because we bundle the crypto 1060 * module code together with the cryptosoft driver (otherwise 1061 * normal module dependencies would handle things). 1062 */ 1063 extern int crypto_modevent(struct module *, int, void *); 1064 /* XXX where to attach */ 1065 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1066 MODULE_VERSION(cryptosoft, 1); 1067 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1068