1 /* $FreeBSD$ */ 2 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 3 4 /* 5 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * 13 * Permission to use, copy, and modify this software with or without fee 14 * is hereby granted, provided that this entire notice is included in 15 * all source code copies of any software which is or includes a copy or 16 * modification of this software. 17 * 18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 22 * PURPOSE. 23 */ 24 25 #include <sys/param.h> 26 #include <sys/systm.h> 27 #include <sys/malloc.h> 28 #include <sys/mbuf.h> 29 #include <sys/sysctl.h> 30 #include <sys/errno.h> 31 #include <sys/random.h> 32 #include <sys/kernel.h> 33 #include <sys/uio.h> 34 35 #include <crypto/blowfish/blowfish.h> 36 #include <crypto/cast128/cast128.h> 37 #include <crypto/sha1.h> 38 #include <opencrypto/rmd160.h> 39 #include <opencrypto/skipjack.h> 40 #include <sys/md5.h> 41 42 #include <opencrypto/cryptodev.h> 43 #include <opencrypto/cryptosoft.h> 44 #include <opencrypto/xform.h> 45 46 u_int8_t hmac_ipad_buffer[64] = { 47 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 48 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 49 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 50 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 51 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 52 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 53 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 54 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36 55 }; 56 57 u_int8_t hmac_opad_buffer[64] = { 58 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 59 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 60 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 61 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 62 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 63 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 64 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 65 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C 66 }; 67 68 69 struct swcr_data **swcr_sessions = NULL; 70 u_int32_t swcr_sesnum = 0; 71 int32_t swcr_id = -1; 72 73 #define COPYBACK(x, a, b, c, d) \ 74 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \ 75 : cuio_copyback((struct uio *)a,b,c,d) 76 #define COPYDATA(x, a, b, c, d) \ 77 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \ 78 : cuio_copydata((struct uio *)a,b,c,d) 79 80 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 81 static int swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd, 82 struct swcr_data *sw, caddr_t buf, int outtype); 83 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 84 static int swcr_process(void *, struct cryptop *, int); 85 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *); 86 static int swcr_freesession(void *, u_int64_t); 87 88 /* 89 * NB: These came over from openbsd and are kept private 90 * to the crypto code for now. 91 */ 92 extern int m_apply(struct mbuf *m, int off, int len, 93 int (*f)(caddr_t, caddr_t, unsigned int), caddr_t fstate); 94 95 /* 96 * Apply a symmetric encryption/decryption algorithm. 97 */ 98 static int 99 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 100 int outtype) 101 { 102 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 103 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN]; 104 struct enc_xform *exf; 105 int i, k, j, blks; 106 107 exf = sw->sw_exf; 108 blks = exf->blocksize; 109 110 /* Check for non-padded data */ 111 if (crd->crd_len % blks) 112 return EINVAL; 113 114 /* Initialize the IV */ 115 if (crd->crd_flags & CRD_F_ENCRYPT) { 116 /* IV explicitly provided ? */ 117 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 118 bcopy(crd->crd_iv, iv, blks); 119 else { 120 /* Get random IV */ 121 for (i = 0; 122 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN; 123 i += sizeof (u_int32_t)) { 124 u_int32_t temp = arc4random(); 125 126 bcopy(&temp, iv + i, sizeof(u_int32_t)); 127 } 128 /* 129 * What if the block size is not a multiple 130 * of sizeof (u_int32_t), which is the size of 131 * what arc4random() returns ? 132 */ 133 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) { 134 u_int32_t temp = arc4random(); 135 136 bcopy (&temp, iv + i, 137 EALG_MAX_BLOCK_LEN - i); 138 } 139 } 140 141 /* Do we need to write the IV */ 142 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) { 143 COPYBACK(outtype, buf, crd->crd_inject, blks, iv); 144 } 145 146 } else { /* Decryption */ 147 /* IV explicitly provided ? */ 148 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 149 bcopy(crd->crd_iv, iv, blks); 150 else { 151 /* Get IV off buf */ 152 COPYDATA(outtype, buf, crd->crd_inject, blks, iv); 153 } 154 } 155 156 ivp = iv; 157 158 if (outtype == CRYPTO_BUF_CONTIG) { 159 if (crd->crd_flags & CRD_F_ENCRYPT) { 160 for (i = crd->crd_skip; 161 i < crd->crd_skip + crd->crd_len; i += blks) { 162 /* XOR with the IV/previous block, as appropriate. */ 163 if (i == crd->crd_skip) 164 for (k = 0; k < blks; k++) 165 buf[i + k] ^= ivp[k]; 166 else 167 for (k = 0; k < blks; k++) 168 buf[i + k] ^= buf[i + k - blks]; 169 exf->encrypt(sw->sw_kschedule, buf + i); 170 } 171 } else { /* Decrypt */ 172 /* 173 * Start at the end, so we don't need to keep the encrypted 174 * block as the IV for the next block. 175 */ 176 for (i = crd->crd_skip + crd->crd_len - blks; 177 i >= crd->crd_skip; i -= blks) { 178 exf->decrypt(sw->sw_kschedule, buf + i); 179 180 /* XOR with the IV/previous block, as appropriate */ 181 if (i == crd->crd_skip) 182 for (k = 0; k < blks; k++) 183 buf[i + k] ^= ivp[k]; 184 else 185 for (k = 0; k < blks; k++) 186 buf[i + k] ^= buf[i + k - blks]; 187 } 188 } 189 190 return 0; 191 } else if (outtype == CRYPTO_BUF_MBUF) { 192 struct mbuf *m = (struct mbuf *) buf; 193 194 /* Find beginning of data */ 195 m = m_getptr(m, crd->crd_skip, &k); 196 if (m == NULL) 197 return EINVAL; 198 199 i = crd->crd_len; 200 201 while (i > 0) { 202 /* 203 * If there's insufficient data at the end of 204 * an mbuf, we have to do some copying. 205 */ 206 if (m->m_len < k + blks && m->m_len != k) { 207 m_copydata(m, k, blks, blk); 208 209 /* Actual encryption/decryption */ 210 if (crd->crd_flags & CRD_F_ENCRYPT) { 211 /* XOR with previous block */ 212 for (j = 0; j < blks; j++) 213 blk[j] ^= ivp[j]; 214 215 exf->encrypt(sw->sw_kschedule, blk); 216 217 /* 218 * Keep encrypted block for XOR'ing 219 * with next block 220 */ 221 bcopy(blk, iv, blks); 222 ivp = iv; 223 } else { /* decrypt */ 224 /* 225 * Keep encrypted block for XOR'ing 226 * with next block 227 */ 228 if (ivp == iv) 229 bcopy(blk, piv, blks); 230 else 231 bcopy(blk, iv, blks); 232 233 exf->decrypt(sw->sw_kschedule, blk); 234 235 /* XOR with previous block */ 236 for (j = 0; j < blks; j++) 237 blk[j] ^= ivp[j]; 238 239 if (ivp == iv) 240 bcopy(piv, iv, blks); 241 else 242 ivp = iv; 243 } 244 245 /* Copy back decrypted block */ 246 m_copyback(m, k, blks, blk); 247 248 /* Advance pointer */ 249 m = m_getptr(m, k + blks, &k); 250 if (m == NULL) 251 return EINVAL; 252 253 i -= blks; 254 255 /* Could be done... */ 256 if (i == 0) 257 break; 258 } 259 260 /* Skip possibly empty mbufs */ 261 if (k == m->m_len) { 262 for (m = m->m_next; m && m->m_len == 0; 263 m = m->m_next) 264 ; 265 k = 0; 266 } 267 268 /* Sanity check */ 269 if (m == NULL) 270 return EINVAL; 271 272 /* 273 * Warning: idat may point to garbage here, but 274 * we only use it in the while() loop, only if 275 * there are indeed enough data. 276 */ 277 idat = mtod(m, unsigned char *) + k; 278 279 while (m->m_len >= k + blks && i > 0) { 280 if (crd->crd_flags & CRD_F_ENCRYPT) { 281 /* XOR with previous block/IV */ 282 for (j = 0; j < blks; j++) 283 idat[j] ^= ivp[j]; 284 285 exf->encrypt(sw->sw_kschedule, idat); 286 ivp = idat; 287 } else { /* decrypt */ 288 /* 289 * Keep encrypted block to be used 290 * in next block's processing. 291 */ 292 if (ivp == iv) 293 bcopy(idat, piv, blks); 294 else 295 bcopy(idat, iv, blks); 296 297 exf->decrypt(sw->sw_kschedule, idat); 298 299 /* XOR with previous block/IV */ 300 for (j = 0; j < blks; j++) 301 idat[j] ^= ivp[j]; 302 303 if (ivp == iv) 304 bcopy(piv, iv, blks); 305 else 306 ivp = iv; 307 } 308 309 idat += blks; 310 k += blks; 311 i -= blks; 312 } 313 } 314 315 return 0; /* Done with mbuf encryption/decryption */ 316 } else if (outtype == CRYPTO_BUF_IOV) { 317 struct uio *uio = (struct uio *) buf; 318 struct iovec *iov; 319 320 /* Find beginning of data */ 321 iov = cuio_getptr(uio, crd->crd_skip, &k); 322 if (iov == NULL) 323 return EINVAL; 324 325 i = crd->crd_len; 326 327 while (i > 0) { 328 /* 329 * If there's insufficient data at the end of 330 * an iovec, we have to do some copying. 331 */ 332 if (iov->iov_len < k + blks && iov->iov_len != k) { 333 cuio_copydata(uio, k, blks, blk); 334 335 /* Actual encryption/decryption */ 336 if (crd->crd_flags & CRD_F_ENCRYPT) { 337 /* XOR with previous block */ 338 for (j = 0; j < blks; j++) 339 blk[j] ^= ivp[j]; 340 341 exf->encrypt(sw->sw_kschedule, blk); 342 343 /* 344 * Keep encrypted block for XOR'ing 345 * with next block 346 */ 347 bcopy(blk, iv, blks); 348 ivp = iv; 349 } else { /* decrypt */ 350 /* 351 * Keep encrypted block for XOR'ing 352 * with next block 353 */ 354 if (ivp == iv) 355 bcopy(blk, piv, blks); 356 else 357 bcopy(blk, iv, blks); 358 359 exf->decrypt(sw->sw_kschedule, blk); 360 361 /* XOR with previous block */ 362 for (j = 0; j < blks; j++) 363 blk[j] ^= ivp[j]; 364 365 if (ivp == iv) 366 bcopy(piv, iv, blks); 367 else 368 ivp = iv; 369 } 370 371 /* Copy back decrypted block */ 372 cuio_copyback(uio, k, blks, blk); 373 374 /* Advance pointer */ 375 iov = cuio_getptr(uio, k + blks, &k); 376 if (iov == NULL) 377 return EINVAL; 378 379 i -= blks; 380 381 /* Could be done... */ 382 if (i == 0) 383 break; 384 } 385 386 /* 387 * Warning: idat may point to garbage here, but 388 * we only use it in the while() loop, only if 389 * there are indeed enough data. 390 */ 391 idat = (char *)iov->iov_base + k; 392 393 while (iov->iov_len >= k + blks && i > 0) { 394 if (crd->crd_flags & CRD_F_ENCRYPT) { 395 /* XOR with previous block/IV */ 396 for (j = 0; j < blks; j++) 397 idat[j] ^= ivp[j]; 398 399 exf->encrypt(sw->sw_kschedule, idat); 400 ivp = idat; 401 } else { /* decrypt */ 402 /* 403 * Keep encrypted block to be used 404 * in next block's processing. 405 */ 406 if (ivp == iv) 407 bcopy(idat, piv, blks); 408 else 409 bcopy(idat, iv, blks); 410 411 exf->decrypt(sw->sw_kschedule, idat); 412 413 /* XOR with previous block/IV */ 414 for (j = 0; j < blks; j++) 415 idat[j] ^= ivp[j]; 416 417 if (ivp == iv) 418 bcopy(piv, iv, blks); 419 else 420 ivp = iv; 421 } 422 423 idat += blks; 424 k += blks; 425 i -= blks; 426 } 427 } 428 429 return 0; /* Done with mbuf encryption/decryption */ 430 } 431 432 /* Unreachable */ 433 return EINVAL; 434 } 435 436 /* 437 * Compute keyed-hash authenticator. 438 */ 439 static int 440 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd, 441 struct swcr_data *sw, caddr_t buf, int outtype) 442 { 443 unsigned char aalg[AALG_MAX_RESULT_LEN]; 444 struct auth_hash *axf; 445 union authctx ctx; 446 int err; 447 448 if (sw->sw_ictx == 0) 449 return EINVAL; 450 451 axf = sw->sw_axf; 452 453 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 454 455 switch (outtype) { 456 case CRYPTO_BUF_CONTIG: 457 axf->Update(&ctx, buf + crd->crd_skip, crd->crd_len); 458 break; 459 case CRYPTO_BUF_MBUF: 460 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len, 461 (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update, 462 (caddr_t) &ctx); 463 if (err) 464 return err; 465 break; 466 case CRYPTO_BUF_IOV: 467 default: 468 return EINVAL; 469 } 470 471 switch (sw->sw_alg) { 472 case CRYPTO_MD5_HMAC: 473 case CRYPTO_SHA1_HMAC: 474 case CRYPTO_SHA2_HMAC: 475 case CRYPTO_RIPEMD160_HMAC: 476 if (sw->sw_octx == NULL) 477 return EINVAL; 478 479 axf->Final(aalg, &ctx); 480 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 481 axf->Update(&ctx, aalg, axf->hashsize); 482 axf->Final(aalg, &ctx); 483 break; 484 485 case CRYPTO_MD5_KPDK: 486 case CRYPTO_SHA1_KPDK: 487 if (sw->sw_octx == NULL) 488 return EINVAL; 489 490 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 491 axf->Final(aalg, &ctx); 492 break; 493 494 case CRYPTO_NULL_HMAC: 495 axf->Final(aalg, &ctx); 496 break; 497 } 498 499 /* Inject the authentication data */ 500 if (outtype == CRYPTO_BUF_CONTIG) 501 bcopy(aalg, buf + crd->crd_inject, axf->authsize); 502 else 503 m_copyback((struct mbuf *) buf, crd->crd_inject, 504 axf->authsize, aalg); 505 return 0; 506 } 507 508 /* 509 * Apply a compression/decompression algorithm 510 */ 511 static int 512 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 513 caddr_t buf, int outtype) 514 { 515 u_int8_t *data, *out; 516 struct comp_algo *cxf; 517 int adj; 518 u_int32_t result; 519 520 cxf = sw->sw_cxf; 521 522 /* We must handle the whole buffer of data in one time 523 * then if there is not all the data in the mbuf, we must 524 * copy in a buffer. 525 */ 526 527 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 528 if (data == NULL) 529 return (EINVAL); 530 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data); 531 532 if (crd->crd_flags & CRD_F_COMP) 533 result = cxf->compress(data, crd->crd_len, &out); 534 else 535 result = cxf->decompress(data, crd->crd_len, &out); 536 537 FREE(data, M_CRYPTO_DATA); 538 if (result == 0) 539 return EINVAL; 540 541 /* Copy back the (de)compressed data. m_copyback is 542 * extending the mbuf as necessary. 543 */ 544 sw->sw_size = result; 545 /* Check the compressed size when doing compression */ 546 if (crd->crd_flags & CRD_F_COMP) { 547 if (result > crd->crd_len) { 548 /* Compression was useless, we lost time */ 549 FREE(out, M_CRYPTO_DATA); 550 return 0; 551 } 552 } 553 554 COPYBACK(outtype, buf, crd->crd_skip, result, out); 555 if (result < crd->crd_len) { 556 adj = result - crd->crd_len; 557 if (outtype == CRYPTO_BUF_MBUF) { 558 adj = result - crd->crd_len; 559 m_adj((struct mbuf *)buf, adj); 560 } else { 561 struct uio *uio = (struct uio *)buf; 562 int ind; 563 564 adj = crd->crd_len - result; 565 ind = uio->uio_iovcnt - 1; 566 567 while (adj > 0 && ind >= 0) { 568 if (adj < uio->uio_iov[ind].iov_len) { 569 uio->uio_iov[ind].iov_len -= adj; 570 break; 571 } 572 573 adj -= uio->uio_iov[ind].iov_len; 574 uio->uio_iov[ind].iov_len = 0; 575 ind--; 576 uio->uio_iovcnt--; 577 } 578 } 579 } 580 FREE(out, M_CRYPTO_DATA); 581 return 0; 582 } 583 584 /* 585 * Generate a new software session. 586 */ 587 static int 588 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri) 589 { 590 struct swcr_data **swd; 591 struct auth_hash *axf; 592 struct enc_xform *txf; 593 struct comp_algo *cxf; 594 u_int32_t i; 595 int k, error; 596 597 if (sid == NULL || cri == NULL) 598 return EINVAL; 599 600 if (swcr_sessions) { 601 for (i = 1; i < swcr_sesnum; i++) 602 if (swcr_sessions[i] == NULL) 603 break; 604 } else 605 i = 1; /* NB: to silence compiler warning */ 606 607 if (swcr_sessions == NULL || i == swcr_sesnum) { 608 if (swcr_sessions == NULL) { 609 i = 1; /* We leave swcr_sessions[0] empty */ 610 swcr_sesnum = CRYPTO_SW_SESSIONS; 611 } else 612 swcr_sesnum *= 2; 613 614 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 615 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 616 if (swd == NULL) { 617 /* Reset session number */ 618 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 619 swcr_sesnum = 0; 620 else 621 swcr_sesnum /= 2; 622 return ENOBUFS; 623 } 624 625 /* Copy existing sessions */ 626 if (swcr_sessions) { 627 bcopy(swcr_sessions, swd, 628 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 629 free(swcr_sessions, M_CRYPTO_DATA); 630 } 631 632 swcr_sessions = swd; 633 } 634 635 swd = &swcr_sessions[i]; 636 *sid = i; 637 638 while (cri) { 639 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data), 640 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 641 if (*swd == NULL) { 642 swcr_freesession(NULL, i); 643 return ENOBUFS; 644 } 645 646 switch (cri->cri_alg) { 647 case CRYPTO_DES_CBC: 648 txf = &enc_xform_des; 649 goto enccommon; 650 case CRYPTO_3DES_CBC: 651 txf = &enc_xform_3des; 652 goto enccommon; 653 case CRYPTO_BLF_CBC: 654 txf = &enc_xform_blf; 655 goto enccommon; 656 case CRYPTO_CAST_CBC: 657 txf = &enc_xform_cast5; 658 goto enccommon; 659 case CRYPTO_SKIPJACK_CBC: 660 txf = &enc_xform_skipjack; 661 goto enccommon; 662 case CRYPTO_RIJNDAEL128_CBC: 663 txf = &enc_xform_rijndael128; 664 goto enccommon; 665 case CRYPTO_NULL_CBC: 666 txf = &enc_xform_null; 667 goto enccommon; 668 enccommon: 669 error = txf->setkey(&((*swd)->sw_kschedule), 670 cri->cri_key, cri->cri_klen / 8); 671 if (error) { 672 swcr_freesession(NULL, i); 673 return error; 674 } 675 (*swd)->sw_exf = txf; 676 break; 677 678 case CRYPTO_MD5_HMAC: 679 axf = &auth_hash_hmac_md5_96; 680 goto authcommon; 681 case CRYPTO_SHA1_HMAC: 682 axf = &auth_hash_hmac_sha1_96; 683 goto authcommon; 684 case CRYPTO_SHA2_HMAC: 685 if (cri->cri_klen == 256) 686 axf = &auth_hash_hmac_sha2_256; 687 else if (cri->cri_klen == 384) 688 axf = &auth_hash_hmac_sha2_384; 689 else if (cri->cri_klen == 512) 690 axf = &auth_hash_hmac_sha2_512; 691 else { 692 swcr_freesession(NULL, i); 693 return EINVAL; 694 } 695 goto authcommon; 696 case CRYPTO_NULL_HMAC: 697 axf = &auth_hash_null; 698 goto authcommon; 699 case CRYPTO_RIPEMD160_HMAC: 700 axf = &auth_hash_hmac_ripemd_160_96; 701 authcommon: 702 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 703 M_NOWAIT); 704 if ((*swd)->sw_ictx == NULL) { 705 swcr_freesession(NULL, i); 706 return ENOBUFS; 707 } 708 709 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 710 M_NOWAIT); 711 if ((*swd)->sw_octx == NULL) { 712 swcr_freesession(NULL, i); 713 return ENOBUFS; 714 } 715 716 for (k = 0; k < cri->cri_klen / 8; k++) 717 cri->cri_key[k] ^= HMAC_IPAD_VAL; 718 719 axf->Init((*swd)->sw_ictx); 720 axf->Update((*swd)->sw_ictx, cri->cri_key, 721 cri->cri_klen / 8); 722 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer, 723 HMAC_BLOCK_LEN - (cri->cri_klen / 8)); 724 725 for (k = 0; k < cri->cri_klen / 8; k++) 726 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 727 728 axf->Init((*swd)->sw_octx); 729 axf->Update((*swd)->sw_octx, cri->cri_key, 730 cri->cri_klen / 8); 731 axf->Update((*swd)->sw_octx, hmac_opad_buffer, 732 HMAC_BLOCK_LEN - (cri->cri_klen / 8)); 733 734 for (k = 0; k < cri->cri_klen / 8; k++) 735 cri->cri_key[k] ^= HMAC_OPAD_VAL; 736 (*swd)->sw_axf = axf; 737 break; 738 739 case CRYPTO_MD5_KPDK: 740 axf = &auth_hash_key_md5; 741 goto auth2common; 742 743 case CRYPTO_SHA1_KPDK: 744 axf = &auth_hash_key_sha1; 745 auth2common: 746 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 747 M_NOWAIT); 748 if ((*swd)->sw_ictx == NULL) { 749 swcr_freesession(NULL, i); 750 return ENOBUFS; 751 } 752 753 /* Store the key so we can "append" it to the payload */ 754 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA, 755 M_NOWAIT); 756 if ((*swd)->sw_octx == NULL) { 757 swcr_freesession(NULL, i); 758 return ENOBUFS; 759 } 760 761 (*swd)->sw_klen = cri->cri_klen / 8; 762 bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8); 763 axf->Init((*swd)->sw_ictx); 764 axf->Update((*swd)->sw_ictx, cri->cri_key, 765 cri->cri_klen / 8); 766 axf->Final(NULL, (*swd)->sw_ictx); 767 (*swd)->sw_axf = axf; 768 break; 769 #ifdef notdef 770 case CRYPTO_MD5: 771 axf = &auth_hash_md5; 772 goto auth3common; 773 774 case CRYPTO_SHA1: 775 axf = &auth_hash_sha1; 776 auth3common: 777 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 778 M_NOWAIT); 779 if ((*swd)->sw_ictx == NULL) { 780 swcr_freesession(NULL, i); 781 return ENOBUFS; 782 } 783 784 axf->Init((*swd)->sw_ictx); 785 (*swd)->sw_axf = axf; 786 break; 787 #endif 788 case CRYPTO_DEFLATE_COMP: 789 cxf = &comp_algo_deflate; 790 (*swd)->sw_cxf = cxf; 791 break; 792 default: 793 swcr_freesession(NULL, i); 794 return EINVAL; 795 } 796 797 (*swd)->sw_alg = cri->cri_alg; 798 cri = cri->cri_next; 799 swd = &((*swd)->sw_next); 800 } 801 return 0; 802 } 803 804 /* 805 * Free a session. 806 */ 807 static int 808 swcr_freesession(void *arg, u_int64_t tid) 809 { 810 struct swcr_data *swd; 811 struct enc_xform *txf; 812 struct auth_hash *axf; 813 struct comp_algo *cxf; 814 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff; 815 816 if (sid > swcr_sesnum || swcr_sessions == NULL || 817 swcr_sessions[sid] == NULL) 818 return EINVAL; 819 820 /* Silently accept and return */ 821 if (sid == 0) 822 return 0; 823 824 while ((swd = swcr_sessions[sid]) != NULL) { 825 swcr_sessions[sid] = swd->sw_next; 826 827 switch (swd->sw_alg) { 828 case CRYPTO_DES_CBC: 829 case CRYPTO_3DES_CBC: 830 case CRYPTO_BLF_CBC: 831 case CRYPTO_CAST_CBC: 832 case CRYPTO_SKIPJACK_CBC: 833 case CRYPTO_RIJNDAEL128_CBC: 834 case CRYPTO_NULL_CBC: 835 txf = swd->sw_exf; 836 837 if (swd->sw_kschedule) 838 txf->zerokey(&(swd->sw_kschedule)); 839 break; 840 841 case CRYPTO_MD5_HMAC: 842 case CRYPTO_SHA1_HMAC: 843 case CRYPTO_SHA2_HMAC: 844 case CRYPTO_RIPEMD160_HMAC: 845 case CRYPTO_NULL_HMAC: 846 axf = swd->sw_axf; 847 848 if (swd->sw_ictx) { 849 bzero(swd->sw_ictx, axf->ctxsize); 850 free(swd->sw_ictx, M_CRYPTO_DATA); 851 } 852 if (swd->sw_octx) { 853 bzero(swd->sw_octx, axf->ctxsize); 854 free(swd->sw_octx, M_CRYPTO_DATA); 855 } 856 break; 857 858 case CRYPTO_MD5_KPDK: 859 case CRYPTO_SHA1_KPDK: 860 axf = swd->sw_axf; 861 862 if (swd->sw_ictx) { 863 bzero(swd->sw_ictx, axf->ctxsize); 864 free(swd->sw_ictx, M_CRYPTO_DATA); 865 } 866 if (swd->sw_octx) { 867 bzero(swd->sw_octx, swd->sw_klen); 868 free(swd->sw_octx, M_CRYPTO_DATA); 869 } 870 break; 871 872 case CRYPTO_MD5: 873 case CRYPTO_SHA1: 874 axf = swd->sw_axf; 875 876 if (swd->sw_ictx) 877 free(swd->sw_ictx, M_CRYPTO_DATA); 878 break; 879 880 case CRYPTO_DEFLATE_COMP: 881 cxf = swd->sw_cxf; 882 break; 883 } 884 885 FREE(swd, M_CRYPTO_DATA); 886 } 887 return 0; 888 } 889 890 /* 891 * Process a software request. 892 */ 893 static int 894 swcr_process(void *arg, struct cryptop *crp, int hint) 895 { 896 struct cryptodesc *crd; 897 struct swcr_data *sw; 898 u_int32_t lid; 899 int type; 900 901 /* Sanity check */ 902 if (crp == NULL) 903 return EINVAL; 904 905 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 906 crp->crp_etype = EINVAL; 907 goto done; 908 } 909 910 lid = crp->crp_sid & 0xffffffff; 911 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) { 912 crp->crp_etype = ENOENT; 913 goto done; 914 } 915 916 if (crp->crp_flags & CRYPTO_F_IMBUF) { 917 type = CRYPTO_BUF_MBUF; 918 } else if (crp->crp_flags & CRYPTO_F_IOV) { 919 type = CRYPTO_BUF_IOV; 920 } else { 921 type = CRYPTO_BUF_CONTIG; 922 } 923 924 /* Go through crypto descriptors, processing as we go */ 925 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 926 /* 927 * Find the crypto context. 928 * 929 * XXX Note that the logic here prevents us from having 930 * XXX the same algorithm multiple times in a session 931 * XXX (or rather, we can but it won't give us the right 932 * XXX results). To do that, we'd need some way of differentiating 933 * XXX between the various instances of an algorithm (so we can 934 * XXX locate the correct crypto context). 935 */ 936 for (sw = swcr_sessions[lid]; 937 sw && sw->sw_alg != crd->crd_alg; 938 sw = sw->sw_next) 939 ; 940 941 /* No such context ? */ 942 if (sw == NULL) { 943 crp->crp_etype = EINVAL; 944 goto done; 945 } 946 switch (sw->sw_alg) { 947 case CRYPTO_DES_CBC: 948 case CRYPTO_3DES_CBC: 949 case CRYPTO_BLF_CBC: 950 case CRYPTO_CAST_CBC: 951 case CRYPTO_SKIPJACK_CBC: 952 case CRYPTO_RIJNDAEL128_CBC: 953 if ((crp->crp_etype = swcr_encdec(crd, sw, 954 crp->crp_buf, type)) != 0) 955 goto done; 956 break; 957 case CRYPTO_NULL_CBC: 958 crp->crp_etype = 0; 959 break; 960 case CRYPTO_MD5_HMAC: 961 case CRYPTO_SHA1_HMAC: 962 case CRYPTO_SHA2_HMAC: 963 case CRYPTO_RIPEMD160_HMAC: 964 case CRYPTO_NULL_HMAC: 965 case CRYPTO_MD5_KPDK: 966 case CRYPTO_SHA1_KPDK: 967 case CRYPTO_MD5: 968 case CRYPTO_SHA1: 969 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw, 970 crp->crp_buf, type)) != 0) 971 goto done; 972 break; 973 974 case CRYPTO_DEFLATE_COMP: 975 if ((crp->crp_etype = swcr_compdec(crd, sw, 976 crp->crp_buf, type)) != 0) 977 goto done; 978 else 979 crp->crp_olen = (int)sw->sw_size; 980 break; 981 982 default: 983 /* Unknown/unsupported algorithm */ 984 crp->crp_etype = EINVAL; 985 goto done; 986 } 987 } 988 989 done: 990 crypto_done(crp); 991 return 0; 992 } 993 994 /* 995 * Initialize the driver, called from the kernel main(). 996 */ 997 static void 998 swcr_init(void) 999 { 1000 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE); 1001 if (swcr_id < 0) 1002 panic("Software crypto device cannot initialize!"); 1003 crypto_register(swcr_id, CRYPTO_DES_CBC, 1004 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL); 1005 #define REGISTER(alg) \ 1006 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL) 1007 REGISTER(CRYPTO_3DES_CBC); 1008 REGISTER(CRYPTO_BLF_CBC); 1009 REGISTER(CRYPTO_CAST_CBC); 1010 REGISTER(CRYPTO_SKIPJACK_CBC); 1011 REGISTER(CRYPTO_NULL_CBC); 1012 REGISTER(CRYPTO_MD5_HMAC); 1013 REGISTER(CRYPTO_SHA1_HMAC); 1014 REGISTER(CRYPTO_SHA2_HMAC); 1015 REGISTER(CRYPTO_RIPEMD160_HMAC); 1016 REGISTER(CRYPTO_NULL_HMAC); 1017 REGISTER(CRYPTO_MD5_KPDK); 1018 REGISTER(CRYPTO_SHA1_KPDK); 1019 REGISTER(CRYPTO_MD5); 1020 REGISTER(CRYPTO_SHA1); 1021 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1022 REGISTER(CRYPTO_DEFLATE_COMP); 1023 #undef REGISTER 1024 } 1025 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL) 1026