1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * 6 * This code was written by Angelos D. Keromytis in Athens, Greece, in 7 * February 2000. Network Security Technologies Inc. (NSTI) kindly 8 * supported the development of this code. 9 * 10 * Copyright (c) 2000, 2001 Angelos D. Keromytis 11 * 12 * Permission to use, copy, and modify this software with or without fee 13 * is hereby granted, provided that this entire notice is included in 14 * all source code copies of any software which is or includes a copy or 15 * modification of this software. 16 * 17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 21 * PURPOSE. 22 */ 23 24 #include <sys/cdefs.h> 25 __FBSDID("$FreeBSD$"); 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/malloc.h> 30 #include <sys/mbuf.h> 31 #include <sys/sysctl.h> 32 #include <sys/errno.h> 33 #include <sys/random.h> 34 #include <sys/kernel.h> 35 #include <sys/uio.h> 36 37 #include <crypto/blowfish/blowfish.h> 38 #include <crypto/sha1.h> 39 #include <opencrypto/rmd160.h> 40 #include <opencrypto/cast.h> 41 #include <opencrypto/skipjack.h> 42 #include <sys/md5.h> 43 44 #include <opencrypto/cryptodev.h> 45 #include <opencrypto/cryptosoft.h> 46 #include <opencrypto/xform.h> 47 48 u_int8_t *hmac_ipad_buffer; 49 u_int8_t *hmac_opad_buffer; 50 51 struct swcr_data **swcr_sessions = NULL; 52 u_int32_t swcr_sesnum = 0; 53 int32_t swcr_id = -1; 54 55 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 56 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 57 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 58 static int swcr_process(void *, struct cryptop *, int); 59 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *); 60 static int swcr_freesession(void *, u_int64_t); 61 62 /* 63 * Apply a symmetric encryption/decryption algorithm. 64 */ 65 static int 66 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 67 int flags) 68 { 69 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 70 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; 71 struct enc_xform *exf; 72 int i, k, j, blks; 73 74 exf = sw->sw_exf; 75 blks = exf->blocksize; 76 77 /* Check for non-padded data */ 78 if (crd->crd_len % blks) 79 return EINVAL; 80 81 /* Initialize the IV */ 82 if (crd->crd_flags & CRD_F_ENCRYPT) { 83 /* IV explicitly provided ? */ 84 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 85 bcopy(crd->crd_iv, iv, blks); 86 else 87 arc4rand(iv, blks, 0); 88 89 /* Do we need to write the IV */ 90 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 91 crypto_copyback(flags, buf, crd->crd_inject, blks, iv); 92 93 } else { /* Decryption */ 94 /* IV explicitly provided ? */ 95 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 96 bcopy(crd->crd_iv, iv, blks); 97 else { 98 /* Get IV off buf */ 99 crypto_copydata(flags, buf, crd->crd_inject, blks, iv); 100 } 101 } 102 103 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 104 int error; 105 106 if (sw->sw_kschedule) 107 exf->zerokey(&(sw->sw_kschedule)); 108 error = exf->setkey(&sw->sw_kschedule, 109 crd->crd_key, crd->crd_klen / 8); 110 if (error) 111 return (error); 112 } 113 ivp = iv; 114 115 if (flags & CRYPTO_F_IMBUF) { 116 struct mbuf *m = (struct mbuf *) buf; 117 118 /* Find beginning of data */ 119 m = m_getptr(m, crd->crd_skip, &k); 120 if (m == NULL) 121 return EINVAL; 122 123 i = crd->crd_len; 124 125 while (i > 0) { 126 /* 127 * If there's insufficient data at the end of 128 * an mbuf, we have to do some copying. 129 */ 130 if (m->m_len < k + blks && m->m_len != k) { 131 m_copydata(m, k, blks, blk); 132 133 /* Actual encryption/decryption */ 134 if (crd->crd_flags & CRD_F_ENCRYPT) { 135 /* XOR with previous block */ 136 for (j = 0; j < blks; j++) 137 blk[j] ^= ivp[j]; 138 139 exf->encrypt(sw->sw_kschedule, blk); 140 141 /* 142 * Keep encrypted block for XOR'ing 143 * with next block 144 */ 145 bcopy(blk, iv, blks); 146 ivp = iv; 147 } else { /* decrypt */ 148 /* 149 * Keep encrypted block for XOR'ing 150 * with next block 151 */ 152 if (ivp == iv) 153 bcopy(blk, piv, blks); 154 else 155 bcopy(blk, iv, blks); 156 157 exf->decrypt(sw->sw_kschedule, blk); 158 159 /* XOR with previous block */ 160 for (j = 0; j < blks; j++) 161 blk[j] ^= ivp[j]; 162 163 if (ivp == iv) 164 bcopy(piv, iv, blks); 165 else 166 ivp = iv; 167 } 168 169 /* Copy back decrypted block */ 170 m_copyback(m, k, blks, blk); 171 172 /* Advance pointer */ 173 m = m_getptr(m, k + blks, &k); 174 if (m == NULL) 175 return EINVAL; 176 177 i -= blks; 178 179 /* Could be done... */ 180 if (i == 0) 181 break; 182 } 183 184 /* Skip possibly empty mbufs */ 185 if (k == m->m_len) { 186 for (m = m->m_next; m && m->m_len == 0; 187 m = m->m_next) 188 ; 189 k = 0; 190 } 191 192 /* Sanity check */ 193 if (m == NULL) 194 return EINVAL; 195 196 /* 197 * Warning: idat may point to garbage here, but 198 * we only use it in the while() loop, only if 199 * there are indeed enough data. 200 */ 201 idat = mtod(m, unsigned char *) + k; 202 203 while (m->m_len >= k + blks && i > 0) { 204 if (crd->crd_flags & CRD_F_ENCRYPT) { 205 /* XOR with previous block/IV */ 206 for (j = 0; j < blks; j++) 207 idat[j] ^= ivp[j]; 208 209 exf->encrypt(sw->sw_kschedule, idat); 210 ivp = idat; 211 } else { /* decrypt */ 212 /* 213 * Keep encrypted block to be used 214 * in next block's processing. 215 */ 216 if (ivp == iv) 217 bcopy(idat, piv, blks); 218 else 219 bcopy(idat, iv, blks); 220 221 exf->decrypt(sw->sw_kschedule, idat); 222 223 /* XOR with previous block/IV */ 224 for (j = 0; j < blks; j++) 225 idat[j] ^= ivp[j]; 226 227 if (ivp == iv) 228 bcopy(piv, iv, blks); 229 else 230 ivp = iv; 231 } 232 233 idat += blks; 234 k += blks; 235 i -= blks; 236 } 237 } 238 239 return 0; /* Done with mbuf encryption/decryption */ 240 } else if (flags & CRYPTO_F_IOV) { 241 struct uio *uio = (struct uio *) buf; 242 struct iovec *iov; 243 244 /* Find beginning of data */ 245 iov = cuio_getptr(uio, crd->crd_skip, &k); 246 if (iov == NULL) 247 return EINVAL; 248 249 i = crd->crd_len; 250 251 while (i > 0) { 252 /* 253 * If there's insufficient data at the end of 254 * an iovec, we have to do some copying. 255 */ 256 if (iov->iov_len < k + blks && iov->iov_len != k) { 257 cuio_copydata(uio, k, blks, blk); 258 259 /* Actual encryption/decryption */ 260 if (crd->crd_flags & CRD_F_ENCRYPT) { 261 /* XOR with previous block */ 262 for (j = 0; j < blks; j++) 263 blk[j] ^= ivp[j]; 264 265 exf->encrypt(sw->sw_kschedule, blk); 266 267 /* 268 * Keep encrypted block for XOR'ing 269 * with next block 270 */ 271 bcopy(blk, iv, blks); 272 ivp = iv; 273 } else { /* decrypt */ 274 /* 275 * Keep encrypted block for XOR'ing 276 * with next block 277 */ 278 if (ivp == iv) 279 bcopy(blk, piv, blks); 280 else 281 bcopy(blk, iv, blks); 282 283 exf->decrypt(sw->sw_kschedule, blk); 284 285 /* XOR with previous block */ 286 for (j = 0; j < blks; j++) 287 blk[j] ^= ivp[j]; 288 289 if (ivp == iv) 290 bcopy(piv, iv, blks); 291 else 292 ivp = iv; 293 } 294 295 /* Copy back decrypted block */ 296 cuio_copyback(uio, k, blks, blk); 297 298 /* Advance pointer */ 299 iov = cuio_getptr(uio, k + blks, &k); 300 if (iov == NULL) 301 return EINVAL; 302 303 i -= blks; 304 305 /* Could be done... */ 306 if (i == 0) 307 break; 308 } 309 310 /* 311 * Warning: idat may point to garbage here, but 312 * we only use it in the while() loop, only if 313 * there are indeed enough data. 314 */ 315 idat = (char *)iov->iov_base + k; 316 317 while (iov->iov_len >= k + blks && i > 0) { 318 if (crd->crd_flags & CRD_F_ENCRYPT) { 319 /* XOR with previous block/IV */ 320 for (j = 0; j < blks; j++) 321 idat[j] ^= ivp[j]; 322 323 exf->encrypt(sw->sw_kschedule, idat); 324 ivp = idat; 325 } else { /* decrypt */ 326 /* 327 * Keep encrypted block to be used 328 * in next block's processing. 329 */ 330 if (ivp == iv) 331 bcopy(idat, piv, blks); 332 else 333 bcopy(idat, iv, blks); 334 335 exf->decrypt(sw->sw_kschedule, idat); 336 337 /* XOR with previous block/IV */ 338 for (j = 0; j < blks; j++) 339 idat[j] ^= ivp[j]; 340 341 if (ivp == iv) 342 bcopy(piv, iv, blks); 343 else 344 ivp = iv; 345 } 346 347 idat += blks; 348 k += blks; 349 i -= blks; 350 } 351 } 352 353 return 0; /* Done with iovec encryption/decryption */ 354 } else { /* contiguous buffer */ 355 if (crd->crd_flags & CRD_F_ENCRYPT) { 356 for (i = crd->crd_skip; 357 i < crd->crd_skip + crd->crd_len; i += blks) { 358 /* XOR with the IV/previous block, as appropriate. */ 359 if (i == crd->crd_skip) 360 for (k = 0; k < blks; k++) 361 buf[i + k] ^= ivp[k]; 362 else 363 for (k = 0; k < blks; k++) 364 buf[i + k] ^= buf[i + k - blks]; 365 exf->encrypt(sw->sw_kschedule, buf + i); 366 } 367 } else { /* Decrypt */ 368 /* 369 * Start at the end, so we don't need to keep the encrypted 370 * block as the IV for the next block. 371 */ 372 for (i = crd->crd_skip + crd->crd_len - blks; 373 i >= crd->crd_skip; i -= blks) { 374 exf->decrypt(sw->sw_kschedule, buf + i); 375 376 /* XOR with the IV/previous block, as appropriate */ 377 if (i == crd->crd_skip) 378 for (k = 0; k < blks; k++) 379 buf[i + k] ^= ivp[k]; 380 else 381 for (k = 0; k < blks; k++) 382 buf[i + k] ^= buf[i + k - blks]; 383 } 384 } 385 386 return 0; /* Done with contiguous buffer encryption/decryption */ 387 } 388 389 /* Unreachable */ 390 return EINVAL; 391 } 392 393 static void 394 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 395 int klen) 396 { 397 int k; 398 399 klen /= 8; 400 401 switch (axf->type) { 402 case CRYPTO_MD5_HMAC: 403 case CRYPTO_SHA1_HMAC: 404 case CRYPTO_SHA2_256_HMAC: 405 case CRYPTO_SHA2_384_HMAC: 406 case CRYPTO_SHA2_512_HMAC: 407 case CRYPTO_NULL_HMAC: 408 case CRYPTO_RIPEMD160_HMAC: 409 for (k = 0; k < klen; k++) 410 key[k] ^= HMAC_IPAD_VAL; 411 412 axf->Init(sw->sw_ictx); 413 axf->Update(sw->sw_ictx, key, klen); 414 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 415 416 for (k = 0; k < klen; k++) 417 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 418 419 axf->Init(sw->sw_octx); 420 axf->Update(sw->sw_octx, key, klen); 421 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 422 423 for (k = 0; k < klen; k++) 424 key[k] ^= HMAC_OPAD_VAL; 425 break; 426 case CRYPTO_MD5_KPDK: 427 case CRYPTO_SHA1_KPDK: 428 sw->sw_klen = klen; 429 bcopy(key, sw->sw_octx, klen); 430 axf->Init(sw->sw_ictx); 431 axf->Update(sw->sw_ictx, key, klen); 432 axf->Final(NULL, sw->sw_ictx); 433 break; 434 default: 435 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 436 "doesn't use keys.\n", __func__, axf->type); 437 } 438 } 439 440 /* 441 * Compute keyed-hash authenticator. 442 */ 443 static int 444 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 445 int flags) 446 { 447 unsigned char aalg[HASH_MAX_LEN]; 448 struct auth_hash *axf; 449 union authctx ctx; 450 int err; 451 452 if (sw->sw_ictx == 0) 453 return EINVAL; 454 455 axf = sw->sw_axf; 456 457 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 458 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 459 460 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 461 462 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 463 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 464 if (err) 465 return err; 466 467 switch (sw->sw_alg) { 468 case CRYPTO_MD5_HMAC: 469 case CRYPTO_SHA1_HMAC: 470 case CRYPTO_SHA2_256_HMAC: 471 case CRYPTO_SHA2_384_HMAC: 472 case CRYPTO_SHA2_512_HMAC: 473 case CRYPTO_RIPEMD160_HMAC: 474 if (sw->sw_octx == NULL) 475 return EINVAL; 476 477 axf->Final(aalg, &ctx); 478 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 479 axf->Update(&ctx, aalg, axf->hashsize); 480 axf->Final(aalg, &ctx); 481 break; 482 483 case CRYPTO_MD5_KPDK: 484 case CRYPTO_SHA1_KPDK: 485 if (sw->sw_octx == NULL) 486 return EINVAL; 487 488 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 489 axf->Final(aalg, &ctx); 490 break; 491 492 case CRYPTO_NULL_HMAC: 493 axf->Final(aalg, &ctx); 494 break; 495 } 496 497 /* Inject the authentication data */ 498 crypto_copyback(flags, buf, crd->crd_inject, 499 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 500 return 0; 501 } 502 503 /* 504 * Apply a compression/decompression algorithm 505 */ 506 static int 507 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 508 caddr_t buf, int flags) 509 { 510 u_int8_t *data, *out; 511 struct comp_algo *cxf; 512 int adj; 513 u_int32_t result; 514 515 cxf = sw->sw_cxf; 516 517 /* We must handle the whole buffer of data in one time 518 * then if there is not all the data in the mbuf, we must 519 * copy in a buffer. 520 */ 521 522 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 523 if (data == NULL) 524 return (EINVAL); 525 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 526 527 if (crd->crd_flags & CRD_F_COMP) 528 result = cxf->compress(data, crd->crd_len, &out); 529 else 530 result = cxf->decompress(data, crd->crd_len, &out); 531 532 FREE(data, M_CRYPTO_DATA); 533 if (result == 0) 534 return EINVAL; 535 536 /* Copy back the (de)compressed data. m_copyback is 537 * extending the mbuf as necessary. 538 */ 539 sw->sw_size = result; 540 /* Check the compressed size when doing compression */ 541 if (crd->crd_flags & CRD_F_COMP) { 542 if (result > crd->crd_len) { 543 /* Compression was useless, we lost time */ 544 FREE(out, M_CRYPTO_DATA); 545 return 0; 546 } 547 } 548 549 crypto_copyback(flags, buf, crd->crd_skip, result, out); 550 if (result < crd->crd_len) { 551 adj = result - crd->crd_len; 552 if (flags & CRYPTO_F_IMBUF) { 553 adj = result - crd->crd_len; 554 m_adj((struct mbuf *)buf, adj); 555 } else if (flags & CRYPTO_F_IOV) { 556 struct uio *uio = (struct uio *)buf; 557 int ind; 558 559 adj = crd->crd_len - result; 560 ind = uio->uio_iovcnt - 1; 561 562 while (adj > 0 && ind >= 0) { 563 if (adj < uio->uio_iov[ind].iov_len) { 564 uio->uio_iov[ind].iov_len -= adj; 565 break; 566 } 567 568 adj -= uio->uio_iov[ind].iov_len; 569 uio->uio_iov[ind].iov_len = 0; 570 ind--; 571 uio->uio_iovcnt--; 572 } 573 } 574 } 575 FREE(out, M_CRYPTO_DATA); 576 return 0; 577 } 578 579 /* 580 * Generate a new software session. 581 */ 582 static int 583 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri) 584 { 585 struct swcr_data **swd; 586 struct auth_hash *axf; 587 struct enc_xform *txf; 588 struct comp_algo *cxf; 589 u_int32_t i; 590 int error; 591 592 if (sid == NULL || cri == NULL) 593 return EINVAL; 594 595 if (swcr_sessions) { 596 for (i = 1; i < swcr_sesnum; i++) 597 if (swcr_sessions[i] == NULL) 598 break; 599 } else 600 i = 1; /* NB: to silence compiler warning */ 601 602 if (swcr_sessions == NULL || i == swcr_sesnum) { 603 if (swcr_sessions == NULL) { 604 i = 1; /* We leave swcr_sessions[0] empty */ 605 swcr_sesnum = CRYPTO_SW_SESSIONS; 606 } else 607 swcr_sesnum *= 2; 608 609 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 610 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 611 if (swd == NULL) { 612 /* Reset session number */ 613 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 614 swcr_sesnum = 0; 615 else 616 swcr_sesnum /= 2; 617 return ENOBUFS; 618 } 619 620 /* Copy existing sessions */ 621 if (swcr_sessions) { 622 bcopy(swcr_sessions, swd, 623 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 624 free(swcr_sessions, M_CRYPTO_DATA); 625 } 626 627 swcr_sessions = swd; 628 } 629 630 swd = &swcr_sessions[i]; 631 *sid = i; 632 633 while (cri) { 634 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data), 635 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 636 if (*swd == NULL) { 637 swcr_freesession(NULL, i); 638 return ENOBUFS; 639 } 640 641 switch (cri->cri_alg) { 642 case CRYPTO_DES_CBC: 643 txf = &enc_xform_des; 644 goto enccommon; 645 case CRYPTO_3DES_CBC: 646 txf = &enc_xform_3des; 647 goto enccommon; 648 case CRYPTO_BLF_CBC: 649 txf = &enc_xform_blf; 650 goto enccommon; 651 case CRYPTO_CAST_CBC: 652 txf = &enc_xform_cast5; 653 goto enccommon; 654 case CRYPTO_SKIPJACK_CBC: 655 txf = &enc_xform_skipjack; 656 goto enccommon; 657 case CRYPTO_RIJNDAEL128_CBC: 658 txf = &enc_xform_rijndael128; 659 goto enccommon; 660 case CRYPTO_NULL_CBC: 661 txf = &enc_xform_null; 662 goto enccommon; 663 enccommon: 664 if (cri->cri_key != NULL) { 665 error = txf->setkey(&((*swd)->sw_kschedule), 666 cri->cri_key, cri->cri_klen / 8); 667 if (error) { 668 swcr_freesession(NULL, i); 669 return error; 670 } 671 } 672 (*swd)->sw_exf = txf; 673 break; 674 675 case CRYPTO_MD5_HMAC: 676 axf = &auth_hash_hmac_md5; 677 goto authcommon; 678 case CRYPTO_SHA1_HMAC: 679 axf = &auth_hash_hmac_sha1; 680 goto authcommon; 681 case CRYPTO_SHA2_256_HMAC: 682 axf = &auth_hash_hmac_sha2_256; 683 goto authcommon; 684 case CRYPTO_SHA2_384_HMAC: 685 axf = &auth_hash_hmac_sha2_384; 686 goto authcommon; 687 case CRYPTO_SHA2_512_HMAC: 688 axf = &auth_hash_hmac_sha2_512; 689 goto authcommon; 690 case CRYPTO_NULL_HMAC: 691 axf = &auth_hash_null; 692 goto authcommon; 693 case CRYPTO_RIPEMD160_HMAC: 694 axf = &auth_hash_hmac_ripemd_160; 695 authcommon: 696 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 697 M_NOWAIT); 698 if ((*swd)->sw_ictx == NULL) { 699 swcr_freesession(NULL, i); 700 return ENOBUFS; 701 } 702 703 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 704 M_NOWAIT); 705 if ((*swd)->sw_octx == NULL) { 706 swcr_freesession(NULL, i); 707 return ENOBUFS; 708 } 709 710 if (cri->cri_key != NULL) { 711 swcr_authprepare(axf, *swd, cri->cri_key, 712 cri->cri_klen); 713 } 714 715 (*swd)->sw_mlen = cri->cri_mlen; 716 (*swd)->sw_axf = axf; 717 break; 718 719 case CRYPTO_MD5_KPDK: 720 axf = &auth_hash_key_md5; 721 goto auth2common; 722 723 case CRYPTO_SHA1_KPDK: 724 axf = &auth_hash_key_sha1; 725 auth2common: 726 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 727 M_NOWAIT); 728 if ((*swd)->sw_ictx == NULL) { 729 swcr_freesession(NULL, i); 730 return ENOBUFS; 731 } 732 733 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 734 M_CRYPTO_DATA, M_NOWAIT); 735 if ((*swd)->sw_octx == NULL) { 736 swcr_freesession(NULL, i); 737 return ENOBUFS; 738 } 739 740 /* Store the key so we can "append" it to the payload */ 741 if (cri->cri_key != NULL) { 742 swcr_authprepare(axf, *swd, cri->cri_key, 743 cri->cri_klen); 744 } 745 746 (*swd)->sw_mlen = cri->cri_mlen; 747 (*swd)->sw_axf = axf; 748 break; 749 #ifdef notdef 750 case CRYPTO_MD5: 751 axf = &auth_hash_md5; 752 goto auth3common; 753 754 case CRYPTO_SHA1: 755 axf = &auth_hash_sha1; 756 auth3common: 757 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 758 M_NOWAIT); 759 if ((*swd)->sw_ictx == NULL) { 760 swcr_freesession(NULL, i); 761 return ENOBUFS; 762 } 763 764 axf->Init((*swd)->sw_ictx); 765 (*swd)->sw_mlen = cri->cri_mlen; 766 (*swd)->sw_axf = axf; 767 break; 768 #endif 769 case CRYPTO_DEFLATE_COMP: 770 cxf = &comp_algo_deflate; 771 (*swd)->sw_cxf = cxf; 772 break; 773 default: 774 swcr_freesession(NULL, i); 775 return EINVAL; 776 } 777 778 (*swd)->sw_alg = cri->cri_alg; 779 cri = cri->cri_next; 780 swd = &((*swd)->sw_next); 781 } 782 return 0; 783 } 784 785 /* 786 * Free a session. 787 */ 788 static int 789 swcr_freesession(void *arg, u_int64_t tid) 790 { 791 struct swcr_data *swd; 792 struct enc_xform *txf; 793 struct auth_hash *axf; 794 struct comp_algo *cxf; 795 u_int32_t sid = CRYPTO_SESID2LID(tid); 796 797 if (sid > swcr_sesnum || swcr_sessions == NULL || 798 swcr_sessions[sid] == NULL) 799 return EINVAL; 800 801 /* Silently accept and return */ 802 if (sid == 0) 803 return 0; 804 805 while ((swd = swcr_sessions[sid]) != NULL) { 806 swcr_sessions[sid] = swd->sw_next; 807 808 switch (swd->sw_alg) { 809 case CRYPTO_DES_CBC: 810 case CRYPTO_3DES_CBC: 811 case CRYPTO_BLF_CBC: 812 case CRYPTO_CAST_CBC: 813 case CRYPTO_SKIPJACK_CBC: 814 case CRYPTO_RIJNDAEL128_CBC: 815 case CRYPTO_NULL_CBC: 816 txf = swd->sw_exf; 817 818 if (swd->sw_kschedule) 819 txf->zerokey(&(swd->sw_kschedule)); 820 break; 821 822 case CRYPTO_MD5_HMAC: 823 case CRYPTO_SHA1_HMAC: 824 case CRYPTO_SHA2_256_HMAC: 825 case CRYPTO_SHA2_384_HMAC: 826 case CRYPTO_SHA2_512_HMAC: 827 case CRYPTO_RIPEMD160_HMAC: 828 case CRYPTO_NULL_HMAC: 829 axf = swd->sw_axf; 830 831 if (swd->sw_ictx) { 832 bzero(swd->sw_ictx, axf->ctxsize); 833 free(swd->sw_ictx, M_CRYPTO_DATA); 834 } 835 if (swd->sw_octx) { 836 bzero(swd->sw_octx, axf->ctxsize); 837 free(swd->sw_octx, M_CRYPTO_DATA); 838 } 839 break; 840 841 case CRYPTO_MD5_KPDK: 842 case CRYPTO_SHA1_KPDK: 843 axf = swd->sw_axf; 844 845 if (swd->sw_ictx) { 846 bzero(swd->sw_ictx, axf->ctxsize); 847 free(swd->sw_ictx, M_CRYPTO_DATA); 848 } 849 if (swd->sw_octx) { 850 bzero(swd->sw_octx, swd->sw_klen); 851 free(swd->sw_octx, M_CRYPTO_DATA); 852 } 853 break; 854 855 case CRYPTO_MD5: 856 case CRYPTO_SHA1: 857 axf = swd->sw_axf; 858 859 if (swd->sw_ictx) 860 free(swd->sw_ictx, M_CRYPTO_DATA); 861 break; 862 863 case CRYPTO_DEFLATE_COMP: 864 cxf = swd->sw_cxf; 865 break; 866 } 867 868 FREE(swd, M_CRYPTO_DATA); 869 } 870 return 0; 871 } 872 873 /* 874 * Process a software request. 875 */ 876 static int 877 swcr_process(void *arg, struct cryptop *crp, int hint) 878 { 879 struct cryptodesc *crd; 880 struct swcr_data *sw; 881 u_int32_t lid; 882 883 /* Sanity check */ 884 if (crp == NULL) 885 return EINVAL; 886 887 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 888 crp->crp_etype = EINVAL; 889 goto done; 890 } 891 892 lid = crp->crp_sid & 0xffffffff; 893 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { 894 crp->crp_etype = ENOENT; 895 goto done; 896 } 897 898 /* Go through crypto descriptors, processing as we go */ 899 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 900 /* 901 * Find the crypto context. 902 * 903 * XXX Note that the logic here prevents us from having 904 * XXX the same algorithm multiple times in a session 905 * XXX (or rather, we can but it won't give us the right 906 * XXX results). To do that, we'd need some way of differentiating 907 * XXX between the various instances of an algorithm (so we can 908 * XXX locate the correct crypto context). 909 */ 910 for (sw = swcr_sessions[lid]; 911 sw && sw->sw_alg != crd->crd_alg; 912 sw = sw->sw_next) 913 ; 914 915 /* No such context ? */ 916 if (sw == NULL) { 917 crp->crp_etype = EINVAL; 918 goto done; 919 } 920 switch (sw->sw_alg) { 921 case CRYPTO_DES_CBC: 922 case CRYPTO_3DES_CBC: 923 case CRYPTO_BLF_CBC: 924 case CRYPTO_CAST_CBC: 925 case CRYPTO_SKIPJACK_CBC: 926 case CRYPTO_RIJNDAEL128_CBC: 927 if ((crp->crp_etype = swcr_encdec(crd, sw, 928 crp->crp_buf, crp->crp_flags)) != 0) 929 goto done; 930 break; 931 case CRYPTO_NULL_CBC: 932 crp->crp_etype = 0; 933 break; 934 case CRYPTO_MD5_HMAC: 935 case CRYPTO_SHA1_HMAC: 936 case CRYPTO_SHA2_256_HMAC: 937 case CRYPTO_SHA2_384_HMAC: 938 case CRYPTO_SHA2_512_HMAC: 939 case CRYPTO_RIPEMD160_HMAC: 940 case CRYPTO_NULL_HMAC: 941 case CRYPTO_MD5_KPDK: 942 case CRYPTO_SHA1_KPDK: 943 case CRYPTO_MD5: 944 case CRYPTO_SHA1: 945 if ((crp->crp_etype = swcr_authcompute(crd, sw, 946 crp->crp_buf, crp->crp_flags)) != 0) 947 goto done; 948 break; 949 950 case CRYPTO_DEFLATE_COMP: 951 if ((crp->crp_etype = swcr_compdec(crd, sw, 952 crp->crp_buf, crp->crp_flags)) != 0) 953 goto done; 954 else 955 crp->crp_olen = (int)sw->sw_size; 956 break; 957 958 default: 959 /* Unknown/unsupported algorithm */ 960 crp->crp_etype = EINVAL; 961 goto done; 962 } 963 } 964 965 done: 966 crypto_done(crp); 967 return 0; 968 } 969 970 /* 971 * Initialize the driver, called from the kernel main(). 972 */ 973 static void 974 swcr_init(void) 975 { 976 u_int i; 977 978 hmac_ipad_buffer = malloc(HMAC_MAX_BLOCK_LEN, M_CRYPTO_DATA, M_WAITOK); 979 for (i = 0; i < HMAC_MAX_BLOCK_LEN; i++) 980 hmac_ipad_buffer[i] = HMAC_IPAD_VAL; 981 hmac_opad_buffer = malloc(HMAC_MAX_BLOCK_LEN, M_CRYPTO_DATA, M_WAITOK); 982 for (i = 0; i < HMAC_MAX_BLOCK_LEN; i++) 983 hmac_opad_buffer[i] = HMAC_OPAD_VAL; 984 985 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 986 if (swcr_id < 0) 987 panic("Software crypto device cannot initialize!"); 988 crypto_register(swcr_id, CRYPTO_DES_CBC, 989 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL); 990 #define REGISTER(alg) \ 991 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL) 992 REGISTER(CRYPTO_3DES_CBC); 993 REGISTER(CRYPTO_BLF_CBC); 994 REGISTER(CRYPTO_CAST_CBC); 995 REGISTER(CRYPTO_SKIPJACK_CBC); 996 REGISTER(CRYPTO_NULL_CBC); 997 REGISTER(CRYPTO_MD5_HMAC); 998 REGISTER(CRYPTO_SHA1_HMAC); 999 REGISTER(CRYPTO_SHA2_256_HMAC); 1000 REGISTER(CRYPTO_SHA2_384_HMAC); 1001 REGISTER(CRYPTO_SHA2_512_HMAC); 1002 REGISTER(CRYPTO_RIPEMD160_HMAC); 1003 REGISTER(CRYPTO_NULL_HMAC); 1004 REGISTER(CRYPTO_MD5_KPDK); 1005 REGISTER(CRYPTO_SHA1_KPDK); 1006 REGISTER(CRYPTO_MD5); 1007 REGISTER(CRYPTO_SHA1); 1008 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1009 REGISTER(CRYPTO_DEFLATE_COMP); 1010 #undef REGISTER 1011 } 1012 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL) 1013 1014 static void 1015 swcr_uninit(void) 1016 { 1017 1018 if (swcr_sessions != NULL) 1019 FREE(swcr_sessions, M_CRYPTO_DATA); 1020 free(hmac_ipad_buffer, M_CRYPTO_DATA); 1021 free(hmac_opad_buffer, M_CRYPTO_DATA); 1022 } 1023 SYSUNINIT(cryptosoft_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_uninit, NULL); 1024