1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 49 #include <crypto/blowfish/blowfish.h> 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 #include <opencrypto/cast.h> 53 #include <opencrypto/skipjack.h> 54 #include <sys/md5.h> 55 56 #include <opencrypto/cryptodev.h> 57 #include <opencrypto/cryptosoft.h> 58 #include <opencrypto/xform.h> 59 60 #include <sys/kobj.h> 61 #include <sys/bus.h> 62 #include "cryptodev_if.h" 63 64 static int32_t swcr_id; 65 static struct swcr_data **swcr_sessions = NULL; 66 static u_int32_t swcr_sesnum; 67 /* Protects swcr_sessions pointer, not data. */ 68 static struct rwlock swcr_sessions_lock; 69 70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 72 73 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 74 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 75 static int swcr_authenc(struct cryptop *crp); 76 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 77 static int swcr_freesession(device_t dev, u_int64_t tid); 78 static int swcr_freesession_locked(device_t dev, u_int64_t tid); 79 80 /* 81 * Apply a symmetric encryption/decryption algorithm. 82 */ 83 static int 84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 85 int flags) 86 { 87 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; 88 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 89 struct enc_xform *exf; 90 int i, j, k, blks, ind, count, ivlen; 91 struct uio *uio, uiolcl; 92 struct iovec iovlcl[4]; 93 struct iovec *iov; 94 int iovcnt, iovalloc; 95 int error; 96 97 error = 0; 98 99 exf = sw->sw_exf; 100 blks = exf->blocksize; 101 ivlen = exf->ivsize; 102 103 /* Check for non-padded data */ 104 if (crd->crd_len % blks) 105 return EINVAL; 106 107 if (crd->crd_alg == CRYPTO_AES_ICM && 108 (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0) 109 return (EINVAL); 110 111 /* Initialize the IV */ 112 if (crd->crd_flags & CRD_F_ENCRYPT) { 113 /* IV explicitly provided ? */ 114 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 115 bcopy(crd->crd_iv, iv, ivlen); 116 else 117 arc4rand(iv, ivlen, 0); 118 119 /* Do we need to write the IV */ 120 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 121 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv); 122 123 } else { /* Decryption */ 124 /* IV explicitly provided ? */ 125 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 126 bcopy(crd->crd_iv, iv, ivlen); 127 else { 128 /* Get IV off buf */ 129 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv); 130 } 131 } 132 133 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 134 int error; 135 136 if (sw->sw_kschedule) 137 exf->zerokey(&(sw->sw_kschedule)); 138 139 error = exf->setkey(&sw->sw_kschedule, 140 crd->crd_key, crd->crd_klen / 8); 141 if (error) 142 return (error); 143 } 144 145 iov = iovlcl; 146 iovcnt = nitems(iovlcl); 147 iovalloc = 0; 148 uio = &uiolcl; 149 if ((flags & CRYPTO_F_IMBUF) != 0) { 150 error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt, 151 &iovalloc); 152 if (error) 153 return (error); 154 uio->uio_iov = iov; 155 uio->uio_iovcnt = iovcnt; 156 } else if ((flags & CRYPTO_F_IOV) != 0) 157 uio = (struct uio *)buf; 158 else { 159 iov[0].iov_base = buf; 160 iov[0].iov_len = crd->crd_skip + crd->crd_len; 161 uio->uio_iov = iov; 162 uio->uio_iovcnt = 1; 163 } 164 165 ivp = iv; 166 167 if (exf->reinit) { 168 /* 169 * xforms that provide a reinit method perform all IV 170 * handling themselves. 171 */ 172 exf->reinit(sw->sw_kschedule, iv); 173 } 174 175 count = crd->crd_skip; 176 ind = cuio_getptr(uio, count, &k); 177 if (ind == -1) { 178 error = EINVAL; 179 goto out; 180 } 181 182 i = crd->crd_len; 183 184 while (i > 0) { 185 /* 186 * If there's insufficient data at the end of 187 * an iovec, we have to do some copying. 188 */ 189 if (uio->uio_iov[ind].iov_len < k + blks && 190 uio->uio_iov[ind].iov_len != k) { 191 cuio_copydata(uio, count, blks, blk); 192 193 /* Actual encryption/decryption */ 194 if (exf->reinit) { 195 if (crd->crd_flags & CRD_F_ENCRYPT) { 196 exf->encrypt(sw->sw_kschedule, 197 blk); 198 } else { 199 exf->decrypt(sw->sw_kschedule, 200 blk); 201 } 202 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 203 /* XOR with previous block */ 204 for (j = 0; j < blks; j++) 205 blk[j] ^= ivp[j]; 206 207 exf->encrypt(sw->sw_kschedule, blk); 208 209 /* 210 * Keep encrypted block for XOR'ing 211 * with next block 212 */ 213 bcopy(blk, iv, blks); 214 ivp = iv; 215 } else { /* decrypt */ 216 /* 217 * Keep encrypted block for XOR'ing 218 * with next block 219 */ 220 nivp = (ivp == iv) ? iv2 : iv; 221 bcopy(blk, nivp, blks); 222 223 exf->decrypt(sw->sw_kschedule, blk); 224 225 /* XOR with previous block */ 226 for (j = 0; j < blks; j++) 227 blk[j] ^= ivp[j]; 228 229 ivp = nivp; 230 } 231 232 /* Copy back decrypted block */ 233 cuio_copyback(uio, count, blks, blk); 234 235 count += blks; 236 237 /* Advance pointer */ 238 ind = cuio_getptr(uio, count, &k); 239 if (ind == -1) { 240 error = EINVAL; 241 goto out; 242 } 243 244 i -= blks; 245 246 /* Could be done... */ 247 if (i == 0) 248 break; 249 } 250 251 while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { 252 uint8_t *idat; 253 size_t nb, rem; 254 255 nb = blks; 256 rem = uio->uio_iov[ind].iov_len - k; 257 idat = (uint8_t *)uio->uio_iov[ind].iov_base + k; 258 259 if (exf->reinit) { 260 if ((crd->crd_flags & CRD_F_ENCRYPT) != 0 && 261 exf->encrypt_multi == NULL) 262 exf->encrypt(sw->sw_kschedule, 263 idat); 264 else if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) { 265 nb = rounddown(rem, blks); 266 exf->encrypt_multi(sw->sw_kschedule, 267 idat, nb); 268 } else if (exf->decrypt_multi == NULL) 269 exf->decrypt(sw->sw_kschedule, 270 idat); 271 else { 272 nb = rounddown(rem, blks); 273 exf->decrypt_multi(sw->sw_kschedule, 274 idat, nb); 275 } 276 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 277 /* XOR with previous block/IV */ 278 for (j = 0; j < blks; j++) 279 idat[j] ^= ivp[j]; 280 281 exf->encrypt(sw->sw_kschedule, idat); 282 ivp = idat; 283 } else { /* decrypt */ 284 /* 285 * Keep encrypted block to be used 286 * in next block's processing. 287 */ 288 nivp = (ivp == iv) ? iv2 : iv; 289 bcopy(idat, nivp, blks); 290 291 exf->decrypt(sw->sw_kschedule, idat); 292 293 /* XOR with previous block/IV */ 294 for (j = 0; j < blks; j++) 295 idat[j] ^= ivp[j]; 296 297 ivp = nivp; 298 } 299 300 count += nb; 301 k += nb; 302 i -= nb; 303 } 304 305 /* 306 * Advance to the next iov if the end of the current iov 307 * is aligned with the end of a cipher block. 308 * Note that the code is equivalent to calling: 309 * ind = cuio_getptr(uio, count, &k); 310 */ 311 if (i > 0 && k == uio->uio_iov[ind].iov_len) { 312 k = 0; 313 ind++; 314 if (ind >= uio->uio_iovcnt) { 315 error = EINVAL; 316 goto out; 317 } 318 } 319 } 320 321 out: 322 if (iovalloc) 323 free(iov, M_CRYPTO_DATA); 324 325 return (error); 326 } 327 328 static void 329 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 330 int klen) 331 { 332 int k; 333 334 klen /= 8; 335 336 switch (axf->type) { 337 case CRYPTO_MD5_HMAC: 338 case CRYPTO_SHA1_HMAC: 339 case CRYPTO_SHA2_256_HMAC: 340 case CRYPTO_SHA2_384_HMAC: 341 case CRYPTO_SHA2_512_HMAC: 342 case CRYPTO_NULL_HMAC: 343 case CRYPTO_RIPEMD160_HMAC: 344 for (k = 0; k < klen; k++) 345 key[k] ^= HMAC_IPAD_VAL; 346 347 axf->Init(sw->sw_ictx); 348 axf->Update(sw->sw_ictx, key, klen); 349 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 350 351 for (k = 0; k < klen; k++) 352 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 353 354 axf->Init(sw->sw_octx); 355 axf->Update(sw->sw_octx, key, klen); 356 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 357 358 for (k = 0; k < klen; k++) 359 key[k] ^= HMAC_OPAD_VAL; 360 break; 361 case CRYPTO_MD5_KPDK: 362 case CRYPTO_SHA1_KPDK: 363 { 364 /* 365 * We need a buffer that can hold an md5 and a sha1 result 366 * just to throw it away. 367 * What we do here is the initial part of: 368 * ALGO( key, keyfill, .. ) 369 * adding the key to sw_ictx and abusing Final() to get the 370 * "keyfill" padding. 371 * In addition we abuse the sw_octx to save the key to have 372 * it to be able to append it at the end in swcr_authcompute(). 373 */ 374 u_char buf[SHA1_RESULTLEN]; 375 376 sw->sw_klen = klen; 377 bcopy(key, sw->sw_octx, klen); 378 axf->Init(sw->sw_ictx); 379 axf->Update(sw->sw_ictx, key, klen); 380 axf->Final(buf, sw->sw_ictx); 381 break; 382 } 383 case CRYPTO_BLAKE2B: 384 case CRYPTO_BLAKE2S: 385 axf->Setkey(sw->sw_ictx, key, klen); 386 axf->Init(sw->sw_ictx); 387 break; 388 default: 389 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 390 "doesn't use keys.\n", __func__, axf->type); 391 } 392 } 393 394 /* 395 * Compute keyed-hash authenticator. 396 */ 397 static int 398 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 399 int flags) 400 { 401 unsigned char aalg[HASH_MAX_LEN]; 402 struct auth_hash *axf; 403 union authctx ctx; 404 int err; 405 406 if (sw->sw_ictx == 0) 407 return EINVAL; 408 409 axf = sw->sw_axf; 410 411 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 412 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 413 414 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 415 416 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 417 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 418 if (err) 419 return err; 420 421 switch (sw->sw_alg) { 422 case CRYPTO_MD5_HMAC: 423 case CRYPTO_SHA1_HMAC: 424 case CRYPTO_SHA2_256_HMAC: 425 case CRYPTO_SHA2_384_HMAC: 426 case CRYPTO_SHA2_512_HMAC: 427 case CRYPTO_RIPEMD160_HMAC: 428 if (sw->sw_octx == NULL) 429 return EINVAL; 430 431 axf->Final(aalg, &ctx); 432 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 433 axf->Update(&ctx, aalg, axf->hashsize); 434 axf->Final(aalg, &ctx); 435 break; 436 437 case CRYPTO_MD5_KPDK: 438 case CRYPTO_SHA1_KPDK: 439 /* If we have no key saved, return error. */ 440 if (sw->sw_octx == NULL) 441 return EINVAL; 442 443 /* 444 * Add the trailing copy of the key (see comment in 445 * swcr_authprepare()) after the data: 446 * ALGO( .., key, algofill ) 447 * and let Final() do the proper, natural "algofill" 448 * padding. 449 */ 450 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 451 axf->Final(aalg, &ctx); 452 break; 453 454 case CRYPTO_BLAKE2B: 455 case CRYPTO_BLAKE2S: 456 case CRYPTO_NULL_HMAC: 457 axf->Final(aalg, &ctx); 458 break; 459 } 460 461 /* Inject the authentication data */ 462 crypto_copyback(flags, buf, crd->crd_inject, 463 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 464 return 0; 465 } 466 467 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 468 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 469 470 /* 471 * Apply a combined encryption-authentication transformation 472 */ 473 static int 474 swcr_authenc(struct cryptop *crp) 475 { 476 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; 477 u_char *blk = (u_char *)blkbuf; 478 u_char aalg[AALG_MAX_RESULT_LEN]; 479 u_char uaalg[AALG_MAX_RESULT_LEN]; 480 u_char iv[EALG_MAX_BLOCK_LEN]; 481 union authctx ctx; 482 struct cryptodesc *crd, *crda = NULL, *crde = NULL; 483 struct swcr_data *sw, *swa, *swe = NULL; 484 struct auth_hash *axf = NULL; 485 struct enc_xform *exf = NULL; 486 caddr_t buf = (caddr_t)crp->crp_buf; 487 uint32_t *blkp; 488 int aadlen, blksz, i, ivlen, len, iskip, oskip, r; 489 490 ivlen = blksz = iskip = oskip = 0; 491 492 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 493 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; 494 sw && sw->sw_alg != crd->crd_alg; 495 sw = sw->sw_next) 496 ; 497 if (sw == NULL) 498 return (EINVAL); 499 500 switch (sw->sw_alg) { 501 case CRYPTO_AES_NIST_GCM_16: 502 case CRYPTO_AES_NIST_GMAC: 503 swe = sw; 504 crde = crd; 505 exf = swe->sw_exf; 506 ivlen = 12; 507 break; 508 case CRYPTO_AES_128_NIST_GMAC: 509 case CRYPTO_AES_192_NIST_GMAC: 510 case CRYPTO_AES_256_NIST_GMAC: 511 swa = sw; 512 crda = crd; 513 axf = swa->sw_axf; 514 if (swa->sw_ictx == 0) 515 return (EINVAL); 516 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 517 blksz = axf->blocksize; 518 break; 519 default: 520 return (EINVAL); 521 } 522 } 523 if (crde == NULL || crda == NULL) 524 return (EINVAL); 525 526 if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 && 527 (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0) 528 return (EINVAL); 529 530 if (crde->crd_klen != crda->crd_klen) 531 return (EINVAL); 532 533 /* Initialize the IV */ 534 if (crde->crd_flags & CRD_F_ENCRYPT) { 535 /* IV explicitly provided ? */ 536 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 537 bcopy(crde->crd_iv, iv, ivlen); 538 else 539 arc4rand(iv, ivlen, 0); 540 541 /* Do we need to write the IV */ 542 if (!(crde->crd_flags & CRD_F_IV_PRESENT)) 543 crypto_copyback(crp->crp_flags, buf, crde->crd_inject, 544 ivlen, iv); 545 546 } else { /* Decryption */ 547 /* IV explicitly provided ? */ 548 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 549 bcopy(crde->crd_iv, iv, ivlen); 550 else { 551 /* Get IV off buf */ 552 crypto_copydata(crp->crp_flags, buf, crde->crd_inject, 553 ivlen, iv); 554 } 555 } 556 557 /* Supply MAC with IV */ 558 if (axf->Reinit) 559 axf->Reinit(&ctx, iv, ivlen); 560 561 /* Supply MAC with AAD */ 562 aadlen = crda->crd_len; 563 564 for (i = iskip; i < crda->crd_len; i += blksz) { 565 len = MIN(crda->crd_len - i, blksz - oskip); 566 crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len, 567 blk + oskip); 568 bzero(blk + len + oskip, blksz - len - oskip); 569 axf->Update(&ctx, blk, blksz); 570 oskip = 0; /* reset initial output offset */ 571 } 572 573 if (exf->reinit) 574 exf->reinit(swe->sw_kschedule, iv); 575 576 /* Do encryption/decryption with MAC */ 577 for (i = 0; i < crde->crd_len; i += len) { 578 if (exf->encrypt_multi != NULL) { 579 len = rounddown(crde->crd_len - i, blksz); 580 if (len == 0) 581 len = blksz; 582 else 583 len = MIN(len, sizeof(blkbuf)); 584 } else 585 len = blksz; 586 len = MIN(crde->crd_len - i, len); 587 if (len < blksz) 588 bzero(blk, blksz); 589 crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len, 590 blk); 591 if (crde->crd_flags & CRD_F_ENCRYPT) { 592 if (exf->encrypt_multi != NULL) 593 exf->encrypt_multi(swe->sw_kschedule, blk, 594 len); 595 else 596 exf->encrypt(swe->sw_kschedule, blk); 597 axf->Update(&ctx, blk, len); 598 crypto_copyback(crp->crp_flags, buf, 599 crde->crd_skip + i, len, blk); 600 } else { 601 axf->Update(&ctx, blk, len); 602 } 603 } 604 605 /* Do any required special finalization */ 606 switch (crda->crd_alg) { 607 case CRYPTO_AES_128_NIST_GMAC: 608 case CRYPTO_AES_192_NIST_GMAC: 609 case CRYPTO_AES_256_NIST_GMAC: 610 /* length block */ 611 bzero(blk, blksz); 612 blkp = (uint32_t *)blk + 1; 613 *blkp = htobe32(aadlen * 8); 614 blkp = (uint32_t *)blk + 3; 615 *blkp = htobe32(crde->crd_len * 8); 616 axf->Update(&ctx, blk, blksz); 617 break; 618 } 619 620 /* Finalize MAC */ 621 axf->Final(aalg, &ctx); 622 623 /* Validate tag */ 624 if (!(crde->crd_flags & CRD_F_ENCRYPT)) { 625 crypto_copydata(crp->crp_flags, buf, crda->crd_inject, 626 axf->hashsize, uaalg); 627 628 r = timingsafe_bcmp(aalg, uaalg, axf->hashsize); 629 if (r == 0) { 630 /* tag matches, decrypt data */ 631 for (i = 0; i < crde->crd_len; i += blksz) { 632 len = MIN(crde->crd_len - i, blksz); 633 if (len < blksz) 634 bzero(blk, blksz); 635 crypto_copydata(crp->crp_flags, buf, 636 crde->crd_skip + i, len, blk); 637 exf->decrypt(swe->sw_kschedule, blk); 638 crypto_copyback(crp->crp_flags, buf, 639 crde->crd_skip + i, len, blk); 640 } 641 } else 642 return (EBADMSG); 643 } else { 644 /* Inject the authentication data */ 645 crypto_copyback(crp->crp_flags, buf, crda->crd_inject, 646 axf->hashsize, aalg); 647 } 648 649 return (0); 650 } 651 652 /* 653 * Apply a compression/decompression algorithm 654 */ 655 static int 656 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 657 caddr_t buf, int flags) 658 { 659 u_int8_t *data, *out; 660 struct comp_algo *cxf; 661 int adj; 662 u_int32_t result; 663 664 cxf = sw->sw_cxf; 665 666 /* We must handle the whole buffer of data in one time 667 * then if there is not all the data in the mbuf, we must 668 * copy in a buffer. 669 */ 670 671 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 672 if (data == NULL) 673 return (EINVAL); 674 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 675 676 if (crd->crd_flags & CRD_F_COMP) 677 result = cxf->compress(data, crd->crd_len, &out); 678 else 679 result = cxf->decompress(data, crd->crd_len, &out); 680 681 free(data, M_CRYPTO_DATA); 682 if (result == 0) 683 return EINVAL; 684 685 /* Copy back the (de)compressed data. m_copyback is 686 * extending the mbuf as necessary. 687 */ 688 sw->sw_size = result; 689 /* Check the compressed size when doing compression */ 690 if (crd->crd_flags & CRD_F_COMP) { 691 if (result >= crd->crd_len) { 692 /* Compression was useless, we lost time */ 693 free(out, M_CRYPTO_DATA); 694 return 0; 695 } 696 } 697 698 crypto_copyback(flags, buf, crd->crd_skip, result, out); 699 if (result < crd->crd_len) { 700 adj = result - crd->crd_len; 701 if (flags & CRYPTO_F_IMBUF) { 702 adj = result - crd->crd_len; 703 m_adj((struct mbuf *)buf, adj); 704 } else if (flags & CRYPTO_F_IOV) { 705 struct uio *uio = (struct uio *)buf; 706 int ind; 707 708 adj = crd->crd_len - result; 709 ind = uio->uio_iovcnt - 1; 710 711 while (adj > 0 && ind >= 0) { 712 if (adj < uio->uio_iov[ind].iov_len) { 713 uio->uio_iov[ind].iov_len -= adj; 714 break; 715 } 716 717 adj -= uio->uio_iov[ind].iov_len; 718 uio->uio_iov[ind].iov_len = 0; 719 ind--; 720 uio->uio_iovcnt--; 721 } 722 } 723 } 724 free(out, M_CRYPTO_DATA); 725 return 0; 726 } 727 728 /* 729 * Generate a new software session. 730 */ 731 static int 732 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 733 { 734 struct swcr_data **swd; 735 struct auth_hash *axf; 736 struct enc_xform *txf; 737 struct comp_algo *cxf; 738 u_int32_t i; 739 int len; 740 int error; 741 742 if (sid == NULL || cri == NULL) 743 return EINVAL; 744 745 rw_wlock(&swcr_sessions_lock); 746 if (swcr_sessions) { 747 for (i = 1; i < swcr_sesnum; i++) 748 if (swcr_sessions[i] == NULL) 749 break; 750 } else 751 i = 1; /* NB: to silence compiler warning */ 752 753 if (swcr_sessions == NULL || i == swcr_sesnum) { 754 if (swcr_sessions == NULL) { 755 i = 1; /* We leave swcr_sessions[0] empty */ 756 swcr_sesnum = CRYPTO_SW_SESSIONS; 757 } else 758 swcr_sesnum *= 2; 759 760 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 761 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 762 if (swd == NULL) { 763 /* Reset session number */ 764 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 765 swcr_sesnum = 0; 766 else 767 swcr_sesnum /= 2; 768 rw_wunlock(&swcr_sessions_lock); 769 return ENOBUFS; 770 } 771 772 /* Copy existing sessions */ 773 if (swcr_sessions != NULL) { 774 bcopy(swcr_sessions, swd, 775 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 776 free(swcr_sessions, M_CRYPTO_DATA); 777 } 778 779 swcr_sessions = swd; 780 } 781 782 rw_downgrade(&swcr_sessions_lock); 783 swd = &swcr_sessions[i]; 784 *sid = i; 785 786 while (cri) { 787 *swd = malloc(sizeof(struct swcr_data), 788 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 789 if (*swd == NULL) { 790 swcr_freesession_locked(dev, i); 791 rw_runlock(&swcr_sessions_lock); 792 return ENOBUFS; 793 } 794 795 switch (cri->cri_alg) { 796 case CRYPTO_DES_CBC: 797 txf = &enc_xform_des; 798 goto enccommon; 799 case CRYPTO_3DES_CBC: 800 txf = &enc_xform_3des; 801 goto enccommon; 802 case CRYPTO_BLF_CBC: 803 txf = &enc_xform_blf; 804 goto enccommon; 805 case CRYPTO_CAST_CBC: 806 txf = &enc_xform_cast5; 807 goto enccommon; 808 case CRYPTO_SKIPJACK_CBC: 809 txf = &enc_xform_skipjack; 810 goto enccommon; 811 case CRYPTO_RIJNDAEL128_CBC: 812 txf = &enc_xform_rijndael128; 813 goto enccommon; 814 case CRYPTO_AES_XTS: 815 txf = &enc_xform_aes_xts; 816 goto enccommon; 817 case CRYPTO_AES_ICM: 818 txf = &enc_xform_aes_icm; 819 goto enccommon; 820 case CRYPTO_AES_NIST_GCM_16: 821 txf = &enc_xform_aes_nist_gcm; 822 goto enccommon; 823 case CRYPTO_AES_NIST_GMAC: 824 txf = &enc_xform_aes_nist_gmac; 825 (*swd)->sw_exf = txf; 826 break; 827 case CRYPTO_CAMELLIA_CBC: 828 txf = &enc_xform_camellia; 829 goto enccommon; 830 case CRYPTO_NULL_CBC: 831 txf = &enc_xform_null; 832 goto enccommon; 833 case CRYPTO_CHACHA20: 834 txf = &enc_xform_chacha20; 835 goto enccommon; 836 enccommon: 837 if (cri->cri_key != NULL) { 838 error = txf->setkey(&((*swd)->sw_kschedule), 839 cri->cri_key, cri->cri_klen / 8); 840 if (error) { 841 swcr_freesession_locked(dev, i); 842 rw_runlock(&swcr_sessions_lock); 843 return error; 844 } 845 } 846 (*swd)->sw_exf = txf; 847 break; 848 849 case CRYPTO_MD5_HMAC: 850 axf = &auth_hash_hmac_md5; 851 goto authcommon; 852 case CRYPTO_SHA1_HMAC: 853 axf = &auth_hash_hmac_sha1; 854 goto authcommon; 855 case CRYPTO_SHA2_256_HMAC: 856 axf = &auth_hash_hmac_sha2_256; 857 goto authcommon; 858 case CRYPTO_SHA2_384_HMAC: 859 axf = &auth_hash_hmac_sha2_384; 860 goto authcommon; 861 case CRYPTO_SHA2_512_HMAC: 862 axf = &auth_hash_hmac_sha2_512; 863 goto authcommon; 864 case CRYPTO_NULL_HMAC: 865 axf = &auth_hash_null; 866 goto authcommon; 867 case CRYPTO_RIPEMD160_HMAC: 868 axf = &auth_hash_hmac_ripemd_160; 869 authcommon: 870 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 871 M_NOWAIT); 872 if ((*swd)->sw_ictx == NULL) { 873 swcr_freesession_locked(dev, i); 874 rw_runlock(&swcr_sessions_lock); 875 return ENOBUFS; 876 } 877 878 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 879 M_NOWAIT); 880 if ((*swd)->sw_octx == NULL) { 881 swcr_freesession_locked(dev, i); 882 rw_runlock(&swcr_sessions_lock); 883 return ENOBUFS; 884 } 885 886 if (cri->cri_key != NULL) { 887 swcr_authprepare(axf, *swd, cri->cri_key, 888 cri->cri_klen); 889 } 890 891 (*swd)->sw_mlen = cri->cri_mlen; 892 (*swd)->sw_axf = axf; 893 break; 894 895 case CRYPTO_MD5_KPDK: 896 axf = &auth_hash_key_md5; 897 goto auth2common; 898 899 case CRYPTO_SHA1_KPDK: 900 axf = &auth_hash_key_sha1; 901 auth2common: 902 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 903 M_NOWAIT); 904 if ((*swd)->sw_ictx == NULL) { 905 swcr_freesession_locked(dev, i); 906 rw_runlock(&swcr_sessions_lock); 907 return ENOBUFS; 908 } 909 910 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 911 M_CRYPTO_DATA, M_NOWAIT); 912 if ((*swd)->sw_octx == NULL) { 913 swcr_freesession_locked(dev, i); 914 rw_runlock(&swcr_sessions_lock); 915 return ENOBUFS; 916 } 917 918 /* Store the key so we can "append" it to the payload */ 919 if (cri->cri_key != NULL) { 920 swcr_authprepare(axf, *swd, cri->cri_key, 921 cri->cri_klen); 922 } 923 924 (*swd)->sw_mlen = cri->cri_mlen; 925 (*swd)->sw_axf = axf; 926 break; 927 #ifdef notdef 928 case CRYPTO_MD5: 929 axf = &auth_hash_md5; 930 goto auth3common; 931 932 case CRYPTO_SHA1: 933 axf = &auth_hash_sha1; 934 auth3common: 935 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 936 M_NOWAIT); 937 if ((*swd)->sw_ictx == NULL) { 938 swcr_freesession_locked(dev, i); 939 rw_runlock(&swcr_sessions_lock); 940 return ENOBUFS; 941 } 942 943 axf->Init((*swd)->sw_ictx); 944 (*swd)->sw_mlen = cri->cri_mlen; 945 (*swd)->sw_axf = axf; 946 break; 947 #endif 948 949 case CRYPTO_AES_128_NIST_GMAC: 950 axf = &auth_hash_nist_gmac_aes_128; 951 goto auth4common; 952 953 case CRYPTO_AES_192_NIST_GMAC: 954 axf = &auth_hash_nist_gmac_aes_192; 955 goto auth4common; 956 957 case CRYPTO_AES_256_NIST_GMAC: 958 axf = &auth_hash_nist_gmac_aes_256; 959 auth4common: 960 len = cri->cri_klen / 8; 961 if (len != 16 && len != 24 && len != 32) { 962 swcr_freesession_locked(dev, i); 963 rw_runlock(&swcr_sessions_lock); 964 return EINVAL; 965 } 966 967 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 968 M_NOWAIT); 969 if ((*swd)->sw_ictx == NULL) { 970 swcr_freesession_locked(dev, i); 971 rw_runlock(&swcr_sessions_lock); 972 return ENOBUFS; 973 } 974 axf->Init((*swd)->sw_ictx); 975 axf->Setkey((*swd)->sw_ictx, cri->cri_key, len); 976 (*swd)->sw_axf = axf; 977 break; 978 979 case CRYPTO_BLAKE2B: 980 axf = &auth_hash_blake2b; 981 goto auth5common; 982 case CRYPTO_BLAKE2S: 983 axf = &auth_hash_blake2s; 984 auth5common: 985 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 986 M_NOWAIT); 987 if ((*swd)->sw_ictx == NULL) { 988 swcr_freesession_locked(dev, i); 989 rw_runlock(&swcr_sessions_lock); 990 return ENOBUFS; 991 } 992 axf->Setkey((*swd)->sw_ictx, cri->cri_key, 993 cri->cri_klen / 8); 994 axf->Init((*swd)->sw_ictx); 995 (*swd)->sw_axf = axf; 996 break; 997 998 case CRYPTO_DEFLATE_COMP: 999 cxf = &comp_algo_deflate; 1000 (*swd)->sw_cxf = cxf; 1001 break; 1002 default: 1003 swcr_freesession_locked(dev, i); 1004 rw_runlock(&swcr_sessions_lock); 1005 return EINVAL; 1006 } 1007 1008 (*swd)->sw_alg = cri->cri_alg; 1009 cri = cri->cri_next; 1010 swd = &((*swd)->sw_next); 1011 } 1012 rw_runlock(&swcr_sessions_lock); 1013 return 0; 1014 } 1015 1016 static int 1017 swcr_freesession(device_t dev, u_int64_t tid) 1018 { 1019 int error; 1020 1021 rw_rlock(&swcr_sessions_lock); 1022 error = swcr_freesession_locked(dev, tid); 1023 rw_runlock(&swcr_sessions_lock); 1024 return error; 1025 } 1026 1027 /* 1028 * Free a session. 1029 */ 1030 static int 1031 swcr_freesession_locked(device_t dev, u_int64_t tid) 1032 { 1033 struct swcr_data *swd; 1034 struct enc_xform *txf; 1035 struct auth_hash *axf; 1036 u_int32_t sid = CRYPTO_SESID2LID(tid); 1037 1038 if (sid > swcr_sesnum || swcr_sessions == NULL || 1039 swcr_sessions[sid] == NULL) 1040 return EINVAL; 1041 1042 /* Silently accept and return */ 1043 if (sid == 0) 1044 return 0; 1045 1046 while ((swd = swcr_sessions[sid]) != NULL) { 1047 swcr_sessions[sid] = swd->sw_next; 1048 1049 switch (swd->sw_alg) { 1050 case CRYPTO_DES_CBC: 1051 case CRYPTO_3DES_CBC: 1052 case CRYPTO_BLF_CBC: 1053 case CRYPTO_CAST_CBC: 1054 case CRYPTO_SKIPJACK_CBC: 1055 case CRYPTO_RIJNDAEL128_CBC: 1056 case CRYPTO_AES_XTS: 1057 case CRYPTO_AES_ICM: 1058 case CRYPTO_AES_NIST_GCM_16: 1059 case CRYPTO_AES_NIST_GMAC: 1060 case CRYPTO_CAMELLIA_CBC: 1061 case CRYPTO_NULL_CBC: 1062 case CRYPTO_CHACHA20: 1063 txf = swd->sw_exf; 1064 1065 if (swd->sw_kschedule) 1066 txf->zerokey(&(swd->sw_kschedule)); 1067 break; 1068 1069 case CRYPTO_MD5_HMAC: 1070 case CRYPTO_SHA1_HMAC: 1071 case CRYPTO_SHA2_256_HMAC: 1072 case CRYPTO_SHA2_384_HMAC: 1073 case CRYPTO_SHA2_512_HMAC: 1074 case CRYPTO_RIPEMD160_HMAC: 1075 case CRYPTO_NULL_HMAC: 1076 axf = swd->sw_axf; 1077 1078 if (swd->sw_ictx) { 1079 bzero(swd->sw_ictx, axf->ctxsize); 1080 free(swd->sw_ictx, M_CRYPTO_DATA); 1081 } 1082 if (swd->sw_octx) { 1083 bzero(swd->sw_octx, axf->ctxsize); 1084 free(swd->sw_octx, M_CRYPTO_DATA); 1085 } 1086 break; 1087 1088 case CRYPTO_MD5_KPDK: 1089 case CRYPTO_SHA1_KPDK: 1090 axf = swd->sw_axf; 1091 1092 if (swd->sw_ictx) { 1093 bzero(swd->sw_ictx, axf->ctxsize); 1094 free(swd->sw_ictx, M_CRYPTO_DATA); 1095 } 1096 if (swd->sw_octx) { 1097 bzero(swd->sw_octx, swd->sw_klen); 1098 free(swd->sw_octx, M_CRYPTO_DATA); 1099 } 1100 break; 1101 1102 case CRYPTO_BLAKE2B: 1103 case CRYPTO_BLAKE2S: 1104 case CRYPTO_MD5: 1105 case CRYPTO_SHA1: 1106 axf = swd->sw_axf; 1107 1108 if (swd->sw_ictx) { 1109 explicit_bzero(swd->sw_ictx, axf->ctxsize); 1110 free(swd->sw_ictx, M_CRYPTO_DATA); 1111 } 1112 break; 1113 1114 case CRYPTO_DEFLATE_COMP: 1115 /* Nothing to do */ 1116 break; 1117 } 1118 1119 free(swd, M_CRYPTO_DATA); 1120 } 1121 return 0; 1122 } 1123 1124 /* 1125 * Process a software request. 1126 */ 1127 static int 1128 swcr_process(device_t dev, struct cryptop *crp, int hint) 1129 { 1130 struct cryptodesc *crd; 1131 struct swcr_data *sw; 1132 u_int32_t lid; 1133 1134 /* Sanity check */ 1135 if (crp == NULL) 1136 return EINVAL; 1137 1138 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 1139 crp->crp_etype = EINVAL; 1140 goto done; 1141 } 1142 1143 lid = CRYPTO_SESID2LID(crp->crp_sid); 1144 rw_rlock(&swcr_sessions_lock); 1145 if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 || 1146 swcr_sessions[lid] == NULL) { 1147 rw_runlock(&swcr_sessions_lock); 1148 crp->crp_etype = ENOENT; 1149 goto done; 1150 } 1151 rw_runlock(&swcr_sessions_lock); 1152 1153 /* Go through crypto descriptors, processing as we go */ 1154 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1155 /* 1156 * Find the crypto context. 1157 * 1158 * XXX Note that the logic here prevents us from having 1159 * XXX the same algorithm multiple times in a session 1160 * XXX (or rather, we can but it won't give us the right 1161 * XXX results). To do that, we'd need some way of differentiating 1162 * XXX between the various instances of an algorithm (so we can 1163 * XXX locate the correct crypto context). 1164 */ 1165 rw_rlock(&swcr_sessions_lock); 1166 if (swcr_sessions == NULL) { 1167 rw_runlock(&swcr_sessions_lock); 1168 crp->crp_etype = ENOENT; 1169 goto done; 1170 } 1171 for (sw = swcr_sessions[lid]; 1172 sw && sw->sw_alg != crd->crd_alg; 1173 sw = sw->sw_next) 1174 ; 1175 rw_runlock(&swcr_sessions_lock); 1176 1177 /* No such context ? */ 1178 if (sw == NULL) { 1179 crp->crp_etype = EINVAL; 1180 goto done; 1181 } 1182 switch (sw->sw_alg) { 1183 case CRYPTO_DES_CBC: 1184 case CRYPTO_3DES_CBC: 1185 case CRYPTO_BLF_CBC: 1186 case CRYPTO_CAST_CBC: 1187 case CRYPTO_SKIPJACK_CBC: 1188 case CRYPTO_RIJNDAEL128_CBC: 1189 case CRYPTO_AES_XTS: 1190 case CRYPTO_AES_ICM: 1191 case CRYPTO_CAMELLIA_CBC: 1192 case CRYPTO_CHACHA20: 1193 if ((crp->crp_etype = swcr_encdec(crd, sw, 1194 crp->crp_buf, crp->crp_flags)) != 0) 1195 goto done; 1196 break; 1197 case CRYPTO_NULL_CBC: 1198 crp->crp_etype = 0; 1199 break; 1200 case CRYPTO_MD5_HMAC: 1201 case CRYPTO_SHA1_HMAC: 1202 case CRYPTO_SHA2_256_HMAC: 1203 case CRYPTO_SHA2_384_HMAC: 1204 case CRYPTO_SHA2_512_HMAC: 1205 case CRYPTO_RIPEMD160_HMAC: 1206 case CRYPTO_NULL_HMAC: 1207 case CRYPTO_MD5_KPDK: 1208 case CRYPTO_SHA1_KPDK: 1209 case CRYPTO_MD5: 1210 case CRYPTO_SHA1: 1211 case CRYPTO_BLAKE2B: 1212 case CRYPTO_BLAKE2S: 1213 if ((crp->crp_etype = swcr_authcompute(crd, sw, 1214 crp->crp_buf, crp->crp_flags)) != 0) 1215 goto done; 1216 break; 1217 1218 case CRYPTO_AES_NIST_GCM_16: 1219 case CRYPTO_AES_NIST_GMAC: 1220 case CRYPTO_AES_128_NIST_GMAC: 1221 case CRYPTO_AES_192_NIST_GMAC: 1222 case CRYPTO_AES_256_NIST_GMAC: 1223 crp->crp_etype = swcr_authenc(crp); 1224 goto done; 1225 1226 case CRYPTO_DEFLATE_COMP: 1227 if ((crp->crp_etype = swcr_compdec(crd, sw, 1228 crp->crp_buf, crp->crp_flags)) != 0) 1229 goto done; 1230 else 1231 crp->crp_olen = (int)sw->sw_size; 1232 break; 1233 1234 default: 1235 /* Unknown/unsupported algorithm */ 1236 crp->crp_etype = EINVAL; 1237 goto done; 1238 } 1239 } 1240 1241 done: 1242 crypto_done(crp); 1243 return 0; 1244 } 1245 1246 static void 1247 swcr_identify(driver_t *drv, device_t parent) 1248 { 1249 /* NB: order 10 is so we get attached after h/w devices */ 1250 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1251 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1252 panic("cryptosoft: could not attach"); 1253 } 1254 1255 static int 1256 swcr_probe(device_t dev) 1257 { 1258 device_set_desc(dev, "software crypto"); 1259 return (BUS_PROBE_NOWILDCARD); 1260 } 1261 1262 static int 1263 swcr_attach(device_t dev) 1264 { 1265 rw_init(&swcr_sessions_lock, "swcr_sessions_lock"); 1266 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 1267 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 1268 1269 swcr_id = crypto_get_driverid(dev, 1270 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1271 if (swcr_id < 0) { 1272 device_printf(dev, "cannot initialize!"); 1273 return ENOMEM; 1274 } 1275 #define REGISTER(alg) \ 1276 crypto_register(swcr_id, alg, 0,0) 1277 REGISTER(CRYPTO_DES_CBC); 1278 REGISTER(CRYPTO_3DES_CBC); 1279 REGISTER(CRYPTO_BLF_CBC); 1280 REGISTER(CRYPTO_CAST_CBC); 1281 REGISTER(CRYPTO_SKIPJACK_CBC); 1282 REGISTER(CRYPTO_NULL_CBC); 1283 REGISTER(CRYPTO_MD5_HMAC); 1284 REGISTER(CRYPTO_SHA1_HMAC); 1285 REGISTER(CRYPTO_SHA2_256_HMAC); 1286 REGISTER(CRYPTO_SHA2_384_HMAC); 1287 REGISTER(CRYPTO_SHA2_512_HMAC); 1288 REGISTER(CRYPTO_RIPEMD160_HMAC); 1289 REGISTER(CRYPTO_NULL_HMAC); 1290 REGISTER(CRYPTO_MD5_KPDK); 1291 REGISTER(CRYPTO_SHA1_KPDK); 1292 REGISTER(CRYPTO_MD5); 1293 REGISTER(CRYPTO_SHA1); 1294 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1295 REGISTER(CRYPTO_AES_XTS); 1296 REGISTER(CRYPTO_AES_ICM); 1297 REGISTER(CRYPTO_AES_NIST_GCM_16); 1298 REGISTER(CRYPTO_AES_NIST_GMAC); 1299 REGISTER(CRYPTO_AES_128_NIST_GMAC); 1300 REGISTER(CRYPTO_AES_192_NIST_GMAC); 1301 REGISTER(CRYPTO_AES_256_NIST_GMAC); 1302 REGISTER(CRYPTO_CAMELLIA_CBC); 1303 REGISTER(CRYPTO_DEFLATE_COMP); 1304 REGISTER(CRYPTO_BLAKE2B); 1305 REGISTER(CRYPTO_BLAKE2S); 1306 REGISTER(CRYPTO_CHACHA20); 1307 #undef REGISTER 1308 1309 return 0; 1310 } 1311 1312 static int 1313 swcr_detach(device_t dev) 1314 { 1315 crypto_unregister_all(swcr_id); 1316 rw_wlock(&swcr_sessions_lock); 1317 free(swcr_sessions, M_CRYPTO_DATA); 1318 swcr_sessions = NULL; 1319 rw_wunlock(&swcr_sessions_lock); 1320 rw_destroy(&swcr_sessions_lock); 1321 return 0; 1322 } 1323 1324 static device_method_t swcr_methods[] = { 1325 DEVMETHOD(device_identify, swcr_identify), 1326 DEVMETHOD(device_probe, swcr_probe), 1327 DEVMETHOD(device_attach, swcr_attach), 1328 DEVMETHOD(device_detach, swcr_detach), 1329 1330 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1331 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1332 DEVMETHOD(cryptodev_process, swcr_process), 1333 1334 {0, 0}, 1335 }; 1336 1337 static driver_t swcr_driver = { 1338 "cryptosoft", 1339 swcr_methods, 1340 0, /* NB: no softc */ 1341 }; 1342 static devclass_t swcr_devclass; 1343 1344 /* 1345 * NB: We explicitly reference the crypto module so we 1346 * get the necessary ordering when built as a loadable 1347 * module. This is required because we bundle the crypto 1348 * module code together with the cryptosoft driver (otherwise 1349 * normal module dependencies would handle things). 1350 */ 1351 extern int crypto_modevent(struct module *, int, void *); 1352 /* XXX where to attach */ 1353 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1354 MODULE_VERSION(cryptosoft, 1); 1355 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1356