1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /* 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * 6 * This code was written by Angelos D. Keromytis in Athens, Greece, in 7 * February 2000. Network Security Technologies Inc. (NSTI) kindly 8 * supported the development of this code. 9 * 10 * Copyright (c) 2000, 2001 Angelos D. Keromytis 11 * 12 * Permission to use, copy, and modify this software with or without fee 13 * is hereby granted, provided that this entire notice is included in 14 * all source code copies of any software which is or includes a copy or 15 * modification of this software. 16 * 17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 21 * PURPOSE. 22 */ 23 24 #include <sys/cdefs.h> 25 __FBSDID("$FreeBSD$"); 26 27 #include <sys/param.h> 28 #include <sys/systm.h> 29 #include <sys/malloc.h> 30 #include <sys/mbuf.h> 31 #include <sys/sysctl.h> 32 #include <sys/errno.h> 33 #include <sys/random.h> 34 #include <sys/kernel.h> 35 #include <sys/uio.h> 36 37 #include <crypto/blowfish/blowfish.h> 38 #include <crypto/cast128/cast128.h> 39 #include <crypto/sha1.h> 40 #include <opencrypto/rmd160.h> 41 #include <opencrypto/skipjack.h> 42 #include <sys/md5.h> 43 44 #include <opencrypto/cryptodev.h> 45 #include <opencrypto/cryptosoft.h> 46 #include <opencrypto/xform.h> 47 48 u_int8_t hmac_ipad_buffer[64] = { 49 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 50 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 51 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 52 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 53 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 54 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 55 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 56 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36 57 }; 58 59 u_int8_t hmac_opad_buffer[64] = { 60 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 61 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 62 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 63 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 64 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 65 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 66 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 67 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C 68 }; 69 70 71 struct swcr_data **swcr_sessions = NULL; 72 u_int32_t swcr_sesnum = 0; 73 int32_t swcr_id = -1; 74 75 #define COPYBACK(x, a, b, c, d) \ 76 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \ 77 : cuio_copyback((struct uio *)a,b,c,d) 78 #define COPYDATA(x, a, b, c, d) \ 79 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \ 80 : cuio_copydata((struct uio *)a,b,c,d) 81 82 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 83 static int swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd, 84 struct swcr_data *sw, caddr_t buf, int outtype); 85 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 86 static int swcr_process(void *, struct cryptop *, int); 87 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *); 88 static int swcr_freesession(void *, u_int64_t); 89 90 /* 91 * Apply a symmetric encryption/decryption algorithm. 92 */ 93 static int 94 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 95 int outtype) 96 { 97 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 98 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; 99 struct enc_xform *exf; 100 int i, k, j, blks; 101 102 exf = sw->sw_exf; 103 blks = exf->blocksize; 104 105 /* Check for non-padded data */ 106 if (crd->crd_len % blks) 107 return EINVAL; 108 109 /* Initialize the IV */ 110 if (crd->crd_flags & CRD_F_ENCRYPT) { 111 /* IV explicitly provided ? */ 112 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 113 bcopy(crd->crd_iv, iv, blks); 114 else { 115 /* Get random IV */ 116 for (i = 0; 117 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN; 118 i += sizeof (u_int32_t)) { 119 u_int32_t temp = arc4random(); 120 121 bcopy(&temp, iv + i, sizeof(u_int32_t)); 122 } 123 /* 124 * What if the block size is not a multiple 125 * of sizeof (u_int32_t), which is the size of 126 * what arc4random() returns ? 127 */ 128 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) { 129 u_int32_t temp = arc4random(); 130 131 bcopy (&temp, iv + i, 132 EALG_MAX_BLOCK_LEN - i); 133 } 134 } 135 136 /* Do we need to write the IV */ 137 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) { 138 COPYBACK(outtype, buf, crd->crd_inject, blks, iv); 139 } 140 141 } else { /* Decryption */ 142 /* IV explicitly provided ? */ 143 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 144 bcopy(crd->crd_iv, iv, blks); 145 else { 146 /* Get IV off buf */ 147 COPYDATA(outtype, buf, crd->crd_inject, blks, iv); 148 } 149 } 150 151 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 152 int error; 153 154 if (sw->sw_kschedule) 155 exf->zerokey(&(sw->sw_kschedule)); 156 error = exf->setkey(&sw->sw_kschedule, 157 crd->crd_key, crd->crd_klen / 8); 158 if (error) 159 return (error); 160 } 161 ivp = iv; 162 163 if (outtype == CRYPTO_BUF_CONTIG) { 164 if (crd->crd_flags & CRD_F_ENCRYPT) { 165 for (i = crd->crd_skip; 166 i < crd->crd_skip + crd->crd_len; i += blks) { 167 /* XOR with the IV/previous block, as appropriate. */ 168 if (i == crd->crd_skip) 169 for (k = 0; k < blks; k++) 170 buf[i + k] ^= ivp[k]; 171 else 172 for (k = 0; k < blks; k++) 173 buf[i + k] ^= buf[i + k - blks]; 174 exf->encrypt(sw->sw_kschedule, buf + i); 175 } 176 } else { /* Decrypt */ 177 /* 178 * Start at the end, so we don't need to keep the encrypted 179 * block as the IV for the next block. 180 */ 181 for (i = crd->crd_skip + crd->crd_len - blks; 182 i >= crd->crd_skip; i -= blks) { 183 exf->decrypt(sw->sw_kschedule, buf + i); 184 185 /* XOR with the IV/previous block, as appropriate */ 186 if (i == crd->crd_skip) 187 for (k = 0; k < blks; k++) 188 buf[i + k] ^= ivp[k]; 189 else 190 for (k = 0; k < blks; k++) 191 buf[i + k] ^= buf[i + k - blks]; 192 } 193 } 194 195 return 0; 196 } else if (outtype == CRYPTO_BUF_MBUF) { 197 struct mbuf *m = (struct mbuf *) buf; 198 199 /* Find beginning of data */ 200 m = m_getptr(m, crd->crd_skip, &k); 201 if (m == NULL) 202 return EINVAL; 203 204 i = crd->crd_len; 205 206 while (i > 0) { 207 /* 208 * If there's insufficient data at the end of 209 * an mbuf, we have to do some copying. 210 */ 211 if (m->m_len < k + blks && m->m_len != k) { 212 m_copydata(m, k, blks, blk); 213 214 /* Actual encryption/decryption */ 215 if (crd->crd_flags & CRD_F_ENCRYPT) { 216 /* XOR with previous block */ 217 for (j = 0; j < blks; j++) 218 blk[j] ^= ivp[j]; 219 220 exf->encrypt(sw->sw_kschedule, blk); 221 222 /* 223 * Keep encrypted block for XOR'ing 224 * with next block 225 */ 226 bcopy(blk, iv, blks); 227 ivp = iv; 228 } else { /* decrypt */ 229 /* 230 * Keep encrypted block for XOR'ing 231 * with next block 232 */ 233 if (ivp == iv) 234 bcopy(blk, piv, blks); 235 else 236 bcopy(blk, iv, blks); 237 238 exf->decrypt(sw->sw_kschedule, blk); 239 240 /* XOR with previous block */ 241 for (j = 0; j < blks; j++) 242 blk[j] ^= ivp[j]; 243 244 if (ivp == iv) 245 bcopy(piv, iv, blks); 246 else 247 ivp = iv; 248 } 249 250 /* Copy back decrypted block */ 251 m_copyback(m, k, blks, blk); 252 253 /* Advance pointer */ 254 m = m_getptr(m, k + blks, &k); 255 if (m == NULL) 256 return EINVAL; 257 258 i -= blks; 259 260 /* Could be done... */ 261 if (i == 0) 262 break; 263 } 264 265 /* Skip possibly empty mbufs */ 266 if (k == m->m_len) { 267 for (m = m->m_next; m && m->m_len == 0; 268 m = m->m_next) 269 ; 270 k = 0; 271 } 272 273 /* Sanity check */ 274 if (m == NULL) 275 return EINVAL; 276 277 /* 278 * Warning: idat may point to garbage here, but 279 * we only use it in the while() loop, only if 280 * there are indeed enough data. 281 */ 282 idat = mtod(m, unsigned char *) + k; 283 284 while (m->m_len >= k + blks && i > 0) { 285 if (crd->crd_flags & CRD_F_ENCRYPT) { 286 /* XOR with previous block/IV */ 287 for (j = 0; j < blks; j++) 288 idat[j] ^= ivp[j]; 289 290 exf->encrypt(sw->sw_kschedule, idat); 291 ivp = idat; 292 } else { /* decrypt */ 293 /* 294 * Keep encrypted block to be used 295 * in next block's processing. 296 */ 297 if (ivp == iv) 298 bcopy(idat, piv, blks); 299 else 300 bcopy(idat, iv, blks); 301 302 exf->decrypt(sw->sw_kschedule, idat); 303 304 /* XOR with previous block/IV */ 305 for (j = 0; j < blks; j++) 306 idat[j] ^= ivp[j]; 307 308 if (ivp == iv) 309 bcopy(piv, iv, blks); 310 else 311 ivp = iv; 312 } 313 314 idat += blks; 315 k += blks; 316 i -= blks; 317 } 318 } 319 320 return 0; /* Done with mbuf encryption/decryption */ 321 } else if (outtype == CRYPTO_BUF_IOV) { 322 struct uio *uio = (struct uio *) buf; 323 struct iovec *iov; 324 325 /* Find beginning of data */ 326 iov = cuio_getptr(uio, crd->crd_skip, &k); 327 if (iov == NULL) 328 return EINVAL; 329 330 i = crd->crd_len; 331 332 while (i > 0) { 333 /* 334 * If there's insufficient data at the end of 335 * an iovec, we have to do some copying. 336 */ 337 if (iov->iov_len < k + blks && iov->iov_len != k) { 338 cuio_copydata(uio, k, blks, blk); 339 340 /* Actual encryption/decryption */ 341 if (crd->crd_flags & CRD_F_ENCRYPT) { 342 /* XOR with previous block */ 343 for (j = 0; j < blks; j++) 344 blk[j] ^= ivp[j]; 345 346 exf->encrypt(sw->sw_kschedule, blk); 347 348 /* 349 * Keep encrypted block for XOR'ing 350 * with next block 351 */ 352 bcopy(blk, iv, blks); 353 ivp = iv; 354 } else { /* decrypt */ 355 /* 356 * Keep encrypted block for XOR'ing 357 * with next block 358 */ 359 if (ivp == iv) 360 bcopy(blk, piv, blks); 361 else 362 bcopy(blk, iv, blks); 363 364 exf->decrypt(sw->sw_kschedule, blk); 365 366 /* XOR with previous block */ 367 for (j = 0; j < blks; j++) 368 blk[j] ^= ivp[j]; 369 370 if (ivp == iv) 371 bcopy(piv, iv, blks); 372 else 373 ivp = iv; 374 } 375 376 /* Copy back decrypted block */ 377 cuio_copyback(uio, k, blks, blk); 378 379 /* Advance pointer */ 380 iov = cuio_getptr(uio, k + blks, &k); 381 if (iov == NULL) 382 return EINVAL; 383 384 i -= blks; 385 386 /* Could be done... */ 387 if (i == 0) 388 break; 389 } 390 391 /* 392 * Warning: idat may point to garbage here, but 393 * we only use it in the while() loop, only if 394 * there are indeed enough data. 395 */ 396 idat = (char *)iov->iov_base + k; 397 398 while (iov->iov_len >= k + blks && i > 0) { 399 if (crd->crd_flags & CRD_F_ENCRYPT) { 400 /* XOR with previous block/IV */ 401 for (j = 0; j < blks; j++) 402 idat[j] ^= ivp[j]; 403 404 exf->encrypt(sw->sw_kschedule, idat); 405 ivp = idat; 406 } else { /* decrypt */ 407 /* 408 * Keep encrypted block to be used 409 * in next block's processing. 410 */ 411 if (ivp == iv) 412 bcopy(idat, piv, blks); 413 else 414 bcopy(idat, iv, blks); 415 416 exf->decrypt(sw->sw_kschedule, idat); 417 418 /* XOR with previous block/IV */ 419 for (j = 0; j < blks; j++) 420 idat[j] ^= ivp[j]; 421 422 if (ivp == iv) 423 bcopy(piv, iv, blks); 424 else 425 ivp = iv; 426 } 427 428 idat += blks; 429 k += blks; 430 i -= blks; 431 } 432 } 433 434 return 0; /* Done with mbuf encryption/decryption */ 435 } 436 437 /* Unreachable */ 438 return EINVAL; 439 } 440 441 /* 442 * Compute keyed-hash authenticator. 443 */ 444 static int 445 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd, 446 struct swcr_data *sw, caddr_t buf, int outtype) 447 { 448 unsigned char aalg[AALG_MAX_RESULT_LEN]; 449 struct auth_hash *axf; 450 union authctx ctx; 451 int err; 452 453 if (sw->sw_ictx == 0) 454 return EINVAL; 455 456 axf = sw->sw_axf; 457 458 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 459 460 switch (outtype) { 461 case CRYPTO_BUF_CONTIG: 462 axf->Update(&ctx, buf + crd->crd_skip, crd->crd_len); 463 break; 464 case CRYPTO_BUF_MBUF: 465 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len, 466 (int (*)(void *, void *, unsigned int)) axf->Update, 467 (caddr_t) &ctx); 468 if (err) 469 return err; 470 break; 471 case CRYPTO_BUF_IOV: 472 default: 473 return EINVAL; 474 } 475 476 switch (sw->sw_alg) { 477 case CRYPTO_MD5_HMAC: 478 case CRYPTO_SHA1_HMAC: 479 case CRYPTO_SHA2_HMAC: 480 case CRYPTO_RIPEMD160_HMAC: 481 if (sw->sw_octx == NULL) 482 return EINVAL; 483 484 axf->Final(aalg, &ctx); 485 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 486 axf->Update(&ctx, aalg, axf->hashsize); 487 axf->Final(aalg, &ctx); 488 break; 489 490 case CRYPTO_MD5_KPDK: 491 case CRYPTO_SHA1_KPDK: 492 if (sw->sw_octx == NULL) 493 return EINVAL; 494 495 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 496 axf->Final(aalg, &ctx); 497 break; 498 499 case CRYPTO_NULL_HMAC: 500 axf->Final(aalg, &ctx); 501 break; 502 } 503 504 /* Inject the authentication data */ 505 if (outtype == CRYPTO_BUF_CONTIG) 506 bcopy(aalg, buf + crd->crd_inject, axf->authsize); 507 else 508 m_copyback((struct mbuf *) buf, crd->crd_inject, 509 axf->authsize, aalg); 510 return 0; 511 } 512 513 /* 514 * Apply a compression/decompression algorithm 515 */ 516 static int 517 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 518 caddr_t buf, int outtype) 519 { 520 u_int8_t *data, *out; 521 struct comp_algo *cxf; 522 int adj; 523 u_int32_t result; 524 525 cxf = sw->sw_cxf; 526 527 /* We must handle the whole buffer of data in one time 528 * then if there is not all the data in the mbuf, we must 529 * copy in a buffer. 530 */ 531 532 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 533 if (data == NULL) 534 return (EINVAL); 535 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data); 536 537 if (crd->crd_flags & CRD_F_COMP) 538 result = cxf->compress(data, crd->crd_len, &out); 539 else 540 result = cxf->decompress(data, crd->crd_len, &out); 541 542 FREE(data, M_CRYPTO_DATA); 543 if (result == 0) 544 return EINVAL; 545 546 /* Copy back the (de)compressed data. m_copyback is 547 * extending the mbuf as necessary. 548 */ 549 sw->sw_size = result; 550 /* Check the compressed size when doing compression */ 551 if (crd->crd_flags & CRD_F_COMP) { 552 if (result > crd->crd_len) { 553 /* Compression was useless, we lost time */ 554 FREE(out, M_CRYPTO_DATA); 555 return 0; 556 } 557 } 558 559 COPYBACK(outtype, buf, crd->crd_skip, result, out); 560 if (result < crd->crd_len) { 561 adj = result - crd->crd_len; 562 if (outtype == CRYPTO_BUF_MBUF) { 563 adj = result - crd->crd_len; 564 m_adj((struct mbuf *)buf, adj); 565 } else { 566 struct uio *uio = (struct uio *)buf; 567 int ind; 568 569 adj = crd->crd_len - result; 570 ind = uio->uio_iovcnt - 1; 571 572 while (adj > 0 && ind >= 0) { 573 if (adj < uio->uio_iov[ind].iov_len) { 574 uio->uio_iov[ind].iov_len -= adj; 575 break; 576 } 577 578 adj -= uio->uio_iov[ind].iov_len; 579 uio->uio_iov[ind].iov_len = 0; 580 ind--; 581 uio->uio_iovcnt--; 582 } 583 } 584 } 585 FREE(out, M_CRYPTO_DATA); 586 return 0; 587 } 588 589 /* 590 * Generate a new software session. 591 */ 592 static int 593 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri) 594 { 595 struct swcr_data **swd; 596 struct auth_hash *axf; 597 struct enc_xform *txf; 598 struct comp_algo *cxf; 599 u_int32_t i; 600 int k, error; 601 602 if (sid == NULL || cri == NULL) 603 return EINVAL; 604 605 if (swcr_sessions) { 606 for (i = 1; i < swcr_sesnum; i++) 607 if (swcr_sessions[i] == NULL) 608 break; 609 } else 610 i = 1; /* NB: to silence compiler warning */ 611 612 if (swcr_sessions == NULL || i == swcr_sesnum) { 613 if (swcr_sessions == NULL) { 614 i = 1; /* We leave swcr_sessions[0] empty */ 615 swcr_sesnum = CRYPTO_SW_SESSIONS; 616 } else 617 swcr_sesnum *= 2; 618 619 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 620 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 621 if (swd == NULL) { 622 /* Reset session number */ 623 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 624 swcr_sesnum = 0; 625 else 626 swcr_sesnum /= 2; 627 return ENOBUFS; 628 } 629 630 /* Copy existing sessions */ 631 if (swcr_sessions) { 632 bcopy(swcr_sessions, swd, 633 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 634 free(swcr_sessions, M_CRYPTO_DATA); 635 } 636 637 swcr_sessions = swd; 638 } 639 640 swd = &swcr_sessions[i]; 641 *sid = i; 642 643 while (cri) { 644 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data), 645 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 646 if (*swd == NULL) { 647 swcr_freesession(NULL, i); 648 return ENOBUFS; 649 } 650 651 switch (cri->cri_alg) { 652 case CRYPTO_DES_CBC: 653 txf = &enc_xform_des; 654 goto enccommon; 655 case CRYPTO_3DES_CBC: 656 txf = &enc_xform_3des; 657 goto enccommon; 658 case CRYPTO_BLF_CBC: 659 txf = &enc_xform_blf; 660 goto enccommon; 661 case CRYPTO_CAST_CBC: 662 txf = &enc_xform_cast5; 663 goto enccommon; 664 case CRYPTO_SKIPJACK_CBC: 665 txf = &enc_xform_skipjack; 666 goto enccommon; 667 case CRYPTO_RIJNDAEL128_CBC: 668 txf = &enc_xform_rijndael128; 669 goto enccommon; 670 case CRYPTO_NULL_CBC: 671 txf = &enc_xform_null; 672 goto enccommon; 673 enccommon: 674 error = txf->setkey(&((*swd)->sw_kschedule), 675 cri->cri_key, cri->cri_klen / 8); 676 if (error) { 677 swcr_freesession(NULL, i); 678 return error; 679 } 680 (*swd)->sw_exf = txf; 681 break; 682 683 case CRYPTO_MD5_HMAC: 684 axf = &auth_hash_hmac_md5_96; 685 goto authcommon; 686 case CRYPTO_SHA1_HMAC: 687 axf = &auth_hash_hmac_sha1_96; 688 goto authcommon; 689 case CRYPTO_SHA2_HMAC: 690 if (cri->cri_klen == 256) 691 axf = &auth_hash_hmac_sha2_256; 692 else if (cri->cri_klen == 384) 693 axf = &auth_hash_hmac_sha2_384; 694 else if (cri->cri_klen == 512) 695 axf = &auth_hash_hmac_sha2_512; 696 else { 697 swcr_freesession(NULL, i); 698 return EINVAL; 699 } 700 goto authcommon; 701 case CRYPTO_NULL_HMAC: 702 axf = &auth_hash_null; 703 goto authcommon; 704 case CRYPTO_RIPEMD160_HMAC: 705 axf = &auth_hash_hmac_ripemd_160_96; 706 authcommon: 707 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 708 M_NOWAIT); 709 if ((*swd)->sw_ictx == NULL) { 710 swcr_freesession(NULL, i); 711 return ENOBUFS; 712 } 713 714 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 715 M_NOWAIT); 716 if ((*swd)->sw_octx == NULL) { 717 swcr_freesession(NULL, i); 718 return ENOBUFS; 719 } 720 721 for (k = 0; k < cri->cri_klen / 8; k++) 722 cri->cri_key[k] ^= HMAC_IPAD_VAL; 723 724 axf->Init((*swd)->sw_ictx); 725 axf->Update((*swd)->sw_ictx, cri->cri_key, 726 cri->cri_klen / 8); 727 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer, 728 HMAC_BLOCK_LEN - (cri->cri_klen / 8)); 729 730 for (k = 0; k < cri->cri_klen / 8; k++) 731 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 732 733 axf->Init((*swd)->sw_octx); 734 axf->Update((*swd)->sw_octx, cri->cri_key, 735 cri->cri_klen / 8); 736 axf->Update((*swd)->sw_octx, hmac_opad_buffer, 737 HMAC_BLOCK_LEN - (cri->cri_klen / 8)); 738 739 for (k = 0; k < cri->cri_klen / 8; k++) 740 cri->cri_key[k] ^= HMAC_OPAD_VAL; 741 (*swd)->sw_axf = axf; 742 break; 743 744 case CRYPTO_MD5_KPDK: 745 axf = &auth_hash_key_md5; 746 goto auth2common; 747 748 case CRYPTO_SHA1_KPDK: 749 axf = &auth_hash_key_sha1; 750 auth2common: 751 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 752 M_NOWAIT); 753 if ((*swd)->sw_ictx == NULL) { 754 swcr_freesession(NULL, i); 755 return ENOBUFS; 756 } 757 758 /* Store the key so we can "append" it to the payload */ 759 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA, 760 M_NOWAIT); 761 if ((*swd)->sw_octx == NULL) { 762 swcr_freesession(NULL, i); 763 return ENOBUFS; 764 } 765 766 (*swd)->sw_klen = cri->cri_klen / 8; 767 bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8); 768 axf->Init((*swd)->sw_ictx); 769 axf->Update((*swd)->sw_ictx, cri->cri_key, 770 cri->cri_klen / 8); 771 axf->Final(NULL, (*swd)->sw_ictx); 772 (*swd)->sw_axf = axf; 773 break; 774 #ifdef notdef 775 case CRYPTO_MD5: 776 axf = &auth_hash_md5; 777 goto auth3common; 778 779 case CRYPTO_SHA1: 780 axf = &auth_hash_sha1; 781 auth3common: 782 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 783 M_NOWAIT); 784 if ((*swd)->sw_ictx == NULL) { 785 swcr_freesession(NULL, i); 786 return ENOBUFS; 787 } 788 789 axf->Init((*swd)->sw_ictx); 790 (*swd)->sw_axf = axf; 791 break; 792 #endif 793 case CRYPTO_DEFLATE_COMP: 794 cxf = &comp_algo_deflate; 795 (*swd)->sw_cxf = cxf; 796 break; 797 default: 798 swcr_freesession(NULL, i); 799 return EINVAL; 800 } 801 802 (*swd)->sw_alg = cri->cri_alg; 803 cri = cri->cri_next; 804 swd = &((*swd)->sw_next); 805 } 806 return 0; 807 } 808 809 /* 810 * Free a session. 811 */ 812 static int 813 swcr_freesession(void *arg, u_int64_t tid) 814 { 815 struct swcr_data *swd; 816 struct enc_xform *txf; 817 struct auth_hash *axf; 818 struct comp_algo *cxf; 819 u_int32_t sid = CRYPTO_SESID2LID(tid); 820 821 if (sid > swcr_sesnum || swcr_sessions == NULL || 822 swcr_sessions[sid] == NULL) 823 return EINVAL; 824 825 /* Silently accept and return */ 826 if (sid == 0) 827 return 0; 828 829 while ((swd = swcr_sessions[sid]) != NULL) { 830 swcr_sessions[sid] = swd->sw_next; 831 832 switch (swd->sw_alg) { 833 case CRYPTO_DES_CBC: 834 case CRYPTO_3DES_CBC: 835 case CRYPTO_BLF_CBC: 836 case CRYPTO_CAST_CBC: 837 case CRYPTO_SKIPJACK_CBC: 838 case CRYPTO_RIJNDAEL128_CBC: 839 case CRYPTO_NULL_CBC: 840 txf = swd->sw_exf; 841 842 if (swd->sw_kschedule) 843 txf->zerokey(&(swd->sw_kschedule)); 844 break; 845 846 case CRYPTO_MD5_HMAC: 847 case CRYPTO_SHA1_HMAC: 848 case CRYPTO_SHA2_HMAC: 849 case CRYPTO_RIPEMD160_HMAC: 850 case CRYPTO_NULL_HMAC: 851 axf = swd->sw_axf; 852 853 if (swd->sw_ictx) { 854 bzero(swd->sw_ictx, axf->ctxsize); 855 free(swd->sw_ictx, M_CRYPTO_DATA); 856 } 857 if (swd->sw_octx) { 858 bzero(swd->sw_octx, axf->ctxsize); 859 free(swd->sw_octx, M_CRYPTO_DATA); 860 } 861 break; 862 863 case CRYPTO_MD5_KPDK: 864 case CRYPTO_SHA1_KPDK: 865 axf = swd->sw_axf; 866 867 if (swd->sw_ictx) { 868 bzero(swd->sw_ictx, axf->ctxsize); 869 free(swd->sw_ictx, M_CRYPTO_DATA); 870 } 871 if (swd->sw_octx) { 872 bzero(swd->sw_octx, swd->sw_klen); 873 free(swd->sw_octx, M_CRYPTO_DATA); 874 } 875 break; 876 877 case CRYPTO_MD5: 878 case CRYPTO_SHA1: 879 axf = swd->sw_axf; 880 881 if (swd->sw_ictx) 882 free(swd->sw_ictx, M_CRYPTO_DATA); 883 break; 884 885 case CRYPTO_DEFLATE_COMP: 886 cxf = swd->sw_cxf; 887 break; 888 } 889 890 FREE(swd, M_CRYPTO_DATA); 891 } 892 return 0; 893 } 894 895 /* 896 * Process a software request. 897 */ 898 static int 899 swcr_process(void *arg, struct cryptop *crp, int hint) 900 { 901 struct cryptodesc *crd; 902 struct swcr_data *sw; 903 u_int32_t lid; 904 int type; 905 906 /* Sanity check */ 907 if (crp == NULL) 908 return EINVAL; 909 910 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 911 crp->crp_etype = EINVAL; 912 goto done; 913 } 914 915 lid = crp->crp_sid & 0xffffffff; 916 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { 917 crp->crp_etype = ENOENT; 918 goto done; 919 } 920 921 if (crp->crp_flags & CRYPTO_F_IMBUF) { 922 type = CRYPTO_BUF_MBUF; 923 } else if (crp->crp_flags & CRYPTO_F_IOV) { 924 type = CRYPTO_BUF_IOV; 925 } else { 926 type = CRYPTO_BUF_CONTIG; 927 } 928 929 /* Go through crypto descriptors, processing as we go */ 930 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 931 /* 932 * Find the crypto context. 933 * 934 * XXX Note that the logic here prevents us from having 935 * XXX the same algorithm multiple times in a session 936 * XXX (or rather, we can but it won't give us the right 937 * XXX results). To do that, we'd need some way of differentiating 938 * XXX between the various instances of an algorithm (so we can 939 * XXX locate the correct crypto context). 940 */ 941 for (sw = swcr_sessions[lid]; 942 sw && sw->sw_alg != crd->crd_alg; 943 sw = sw->sw_next) 944 ; 945 946 /* No such context ? */ 947 if (sw == NULL) { 948 crp->crp_etype = EINVAL; 949 goto done; 950 } 951 switch (sw->sw_alg) { 952 case CRYPTO_DES_CBC: 953 case CRYPTO_3DES_CBC: 954 case CRYPTO_BLF_CBC: 955 case CRYPTO_CAST_CBC: 956 case CRYPTO_SKIPJACK_CBC: 957 case CRYPTO_RIJNDAEL128_CBC: 958 if ((crp->crp_etype = swcr_encdec(crd, sw, 959 crp->crp_buf, type)) != 0) 960 goto done; 961 break; 962 case CRYPTO_NULL_CBC: 963 crp->crp_etype = 0; 964 break; 965 case CRYPTO_MD5_HMAC: 966 case CRYPTO_SHA1_HMAC: 967 case CRYPTO_SHA2_HMAC: 968 case CRYPTO_RIPEMD160_HMAC: 969 case CRYPTO_NULL_HMAC: 970 case CRYPTO_MD5_KPDK: 971 case CRYPTO_SHA1_KPDK: 972 case CRYPTO_MD5: 973 case CRYPTO_SHA1: 974 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw, 975 crp->crp_buf, type)) != 0) 976 goto done; 977 break; 978 979 case CRYPTO_DEFLATE_COMP: 980 if ((crp->crp_etype = swcr_compdec(crd, sw, 981 crp->crp_buf, type)) != 0) 982 goto done; 983 else 984 crp->crp_olen = (int)sw->sw_size; 985 break; 986 987 default: 988 /* Unknown/unsupported algorithm */ 989 crp->crp_etype = EINVAL; 990 goto done; 991 } 992 } 993 994 done: 995 crypto_done(crp); 996 return 0; 997 } 998 999 /* 1000 * Initialize the driver, called from the kernel main(). 1001 */ 1002 static void 1003 swcr_init(void) 1004 { 1005 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1006 if (swcr_id < 0) 1007 panic("Software crypto device cannot initialize!"); 1008 crypto_register(swcr_id, CRYPTO_DES_CBC, 1009 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL); 1010 #define REGISTER(alg) \ 1011 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL) 1012 REGISTER(CRYPTO_3DES_CBC); 1013 REGISTER(CRYPTO_BLF_CBC); 1014 REGISTER(CRYPTO_CAST_CBC); 1015 REGISTER(CRYPTO_SKIPJACK_CBC); 1016 REGISTER(CRYPTO_NULL_CBC); 1017 REGISTER(CRYPTO_MD5_HMAC); 1018 REGISTER(CRYPTO_SHA1_HMAC); 1019 REGISTER(CRYPTO_SHA2_HMAC); 1020 REGISTER(CRYPTO_RIPEMD160_HMAC); 1021 REGISTER(CRYPTO_NULL_HMAC); 1022 REGISTER(CRYPTO_MD5_KPDK); 1023 REGISTER(CRYPTO_SHA1_KPDK); 1024 REGISTER(CRYPTO_MD5); 1025 REGISTER(CRYPTO_SHA1); 1026 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1027 REGISTER(CRYPTO_DEFLATE_COMP); 1028 #undef REGISTER 1029 } 1030 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL) 1031