1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 49 #include <crypto/blowfish/blowfish.h> 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 #include <opencrypto/cast.h> 53 #include <opencrypto/skipjack.h> 54 #include <sys/md5.h> 55 56 #include <opencrypto/cryptodev.h> 57 #include <opencrypto/cryptosoft.h> 58 #include <opencrypto/xform.h> 59 60 #include <sys/kobj.h> 61 #include <sys/bus.h> 62 #include "cryptodev_if.h" 63 64 static int32_t swcr_id; 65 static struct swcr_data **swcr_sessions = NULL; 66 static u_int32_t swcr_sesnum; 67 /* Protects swcr_sessions pointer, not data. */ 68 static struct rwlock swcr_sessions_lock; 69 70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 72 73 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 74 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 75 static int swcr_authenc(struct cryptop *crp); 76 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 77 static int swcr_freesession(device_t dev, u_int64_t tid); 78 static int swcr_freesession_locked(device_t dev, u_int64_t tid); 79 80 /* 81 * Apply a symmetric encryption/decryption algorithm. 82 */ 83 static int 84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 85 int flags) 86 { 87 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN]; 88 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 89 struct enc_xform *exf; 90 int i, j, k, blks, ind, count, ivlen; 91 struct uio *uio, uiolcl; 92 struct iovec iovlcl[4]; 93 struct iovec *iov; 94 int iovcnt, iovalloc; 95 int error; 96 97 error = 0; 98 99 exf = sw->sw_exf; 100 blks = exf->blocksize; 101 ivlen = exf->ivsize; 102 103 /* Check for non-padded data */ 104 if (crd->crd_len % blks) 105 return EINVAL; 106 107 if (crd->crd_alg == CRYPTO_AES_ICM && 108 (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0) 109 return (EINVAL); 110 111 /* Initialize the IV */ 112 if (crd->crd_flags & CRD_F_ENCRYPT) { 113 /* IV explicitly provided ? */ 114 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 115 bcopy(crd->crd_iv, iv, ivlen); 116 else 117 arc4rand(iv, ivlen, 0); 118 119 /* Do we need to write the IV */ 120 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 121 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv); 122 123 } else { /* Decryption */ 124 /* IV explicitly provided ? */ 125 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 126 bcopy(crd->crd_iv, iv, ivlen); 127 else { 128 /* Get IV off buf */ 129 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv); 130 } 131 } 132 133 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 134 int error; 135 136 if (sw->sw_kschedule) 137 exf->zerokey(&(sw->sw_kschedule)); 138 139 error = exf->setkey(&sw->sw_kschedule, 140 crd->crd_key, crd->crd_klen / 8); 141 if (error) 142 return (error); 143 } 144 145 iov = iovlcl; 146 iovcnt = nitems(iovlcl); 147 iovalloc = 0; 148 uio = &uiolcl; 149 if ((flags & CRYPTO_F_IMBUF) != 0) { 150 error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt, 151 &iovalloc); 152 if (error) 153 return (error); 154 uio->uio_iov = iov; 155 uio->uio_iovcnt = iovcnt; 156 } else if ((flags & CRYPTO_F_IOV) != 0) 157 uio = (struct uio *)buf; 158 else { 159 iov[0].iov_base = buf; 160 iov[0].iov_len = crd->crd_skip + crd->crd_len; 161 uio->uio_iov = iov; 162 uio->uio_iovcnt = 1; 163 } 164 165 ivp = iv; 166 167 if (exf->reinit) { 168 /* 169 * xforms that provide a reinit method perform all IV 170 * handling themselves. 171 */ 172 exf->reinit(sw->sw_kschedule, iv); 173 } 174 175 count = crd->crd_skip; 176 ind = cuio_getptr(uio, count, &k); 177 if (ind == -1) { 178 error = EINVAL; 179 goto out; 180 } 181 182 i = crd->crd_len; 183 184 while (i > 0) { 185 /* 186 * If there's insufficient data at the end of 187 * an iovec, we have to do some copying. 188 */ 189 if (uio->uio_iov[ind].iov_len < k + blks && 190 uio->uio_iov[ind].iov_len != k) { 191 cuio_copydata(uio, count, blks, blk); 192 193 /* Actual encryption/decryption */ 194 if (exf->reinit) { 195 if (crd->crd_flags & CRD_F_ENCRYPT) { 196 exf->encrypt(sw->sw_kschedule, 197 blk); 198 } else { 199 exf->decrypt(sw->sw_kschedule, 200 blk); 201 } 202 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 203 /* XOR with previous block */ 204 for (j = 0; j < blks; j++) 205 blk[j] ^= ivp[j]; 206 207 exf->encrypt(sw->sw_kschedule, blk); 208 209 /* 210 * Keep encrypted block for XOR'ing 211 * with next block 212 */ 213 bcopy(blk, iv, blks); 214 ivp = iv; 215 } else { /* decrypt */ 216 /* 217 * Keep encrypted block for XOR'ing 218 * with next block 219 */ 220 nivp = (ivp == iv) ? iv2 : iv; 221 bcopy(blk, nivp, blks); 222 223 exf->decrypt(sw->sw_kschedule, blk); 224 225 /* XOR with previous block */ 226 for (j = 0; j < blks; j++) 227 blk[j] ^= ivp[j]; 228 229 ivp = nivp; 230 } 231 232 /* Copy back decrypted block */ 233 cuio_copyback(uio, count, blks, blk); 234 235 count += blks; 236 237 /* Advance pointer */ 238 ind = cuio_getptr(uio, count, &k); 239 if (ind == -1) { 240 error = EINVAL; 241 goto out; 242 } 243 244 i -= blks; 245 246 /* Could be done... */ 247 if (i == 0) 248 break; 249 } 250 251 while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { 252 uint8_t *idat; 253 size_t nb, rem; 254 255 nb = blks; 256 rem = MIN((size_t)i, 257 uio->uio_iov[ind].iov_len - (size_t)k); 258 idat = (uint8_t *)uio->uio_iov[ind].iov_base + k; 259 260 if (exf->reinit) { 261 if ((crd->crd_flags & CRD_F_ENCRYPT) != 0 && 262 exf->encrypt_multi == NULL) 263 exf->encrypt(sw->sw_kschedule, 264 idat); 265 else if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) { 266 nb = rounddown(rem, blks); 267 exf->encrypt_multi(sw->sw_kschedule, 268 idat, nb); 269 } else if (exf->decrypt_multi == NULL) 270 exf->decrypt(sw->sw_kschedule, 271 idat); 272 else { 273 nb = rounddown(rem, blks); 274 exf->decrypt_multi(sw->sw_kschedule, 275 idat, nb); 276 } 277 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 278 /* XOR with previous block/IV */ 279 for (j = 0; j < blks; j++) 280 idat[j] ^= ivp[j]; 281 282 exf->encrypt(sw->sw_kschedule, idat); 283 ivp = idat; 284 } else { /* decrypt */ 285 /* 286 * Keep encrypted block to be used 287 * in next block's processing. 288 */ 289 nivp = (ivp == iv) ? iv2 : iv; 290 bcopy(idat, nivp, blks); 291 292 exf->decrypt(sw->sw_kschedule, idat); 293 294 /* XOR with previous block/IV */ 295 for (j = 0; j < blks; j++) 296 idat[j] ^= ivp[j]; 297 298 ivp = nivp; 299 } 300 301 count += nb; 302 k += nb; 303 i -= nb; 304 } 305 306 /* 307 * Advance to the next iov if the end of the current iov 308 * is aligned with the end of a cipher block. 309 * Note that the code is equivalent to calling: 310 * ind = cuio_getptr(uio, count, &k); 311 */ 312 if (i > 0 && k == uio->uio_iov[ind].iov_len) { 313 k = 0; 314 ind++; 315 if (ind >= uio->uio_iovcnt) { 316 error = EINVAL; 317 goto out; 318 } 319 } 320 } 321 322 out: 323 if (iovalloc) 324 free(iov, M_CRYPTO_DATA); 325 326 return (error); 327 } 328 329 static void 330 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 331 int klen) 332 { 333 int k; 334 335 klen /= 8; 336 337 switch (axf->type) { 338 case CRYPTO_MD5_HMAC: 339 case CRYPTO_SHA1_HMAC: 340 case CRYPTO_SHA2_256_HMAC: 341 case CRYPTO_SHA2_384_HMAC: 342 case CRYPTO_SHA2_512_HMAC: 343 case CRYPTO_NULL_HMAC: 344 case CRYPTO_RIPEMD160_HMAC: 345 for (k = 0; k < klen; k++) 346 key[k] ^= HMAC_IPAD_VAL; 347 348 axf->Init(sw->sw_ictx); 349 axf->Update(sw->sw_ictx, key, klen); 350 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 351 352 for (k = 0; k < klen; k++) 353 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 354 355 axf->Init(sw->sw_octx); 356 axf->Update(sw->sw_octx, key, klen); 357 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 358 359 for (k = 0; k < klen; k++) 360 key[k] ^= HMAC_OPAD_VAL; 361 break; 362 case CRYPTO_MD5_KPDK: 363 case CRYPTO_SHA1_KPDK: 364 { 365 /* 366 * We need a buffer that can hold an md5 and a sha1 result 367 * just to throw it away. 368 * What we do here is the initial part of: 369 * ALGO( key, keyfill, .. ) 370 * adding the key to sw_ictx and abusing Final() to get the 371 * "keyfill" padding. 372 * In addition we abuse the sw_octx to save the key to have 373 * it to be able to append it at the end in swcr_authcompute(). 374 */ 375 u_char buf[SHA1_RESULTLEN]; 376 377 sw->sw_klen = klen; 378 bcopy(key, sw->sw_octx, klen); 379 axf->Init(sw->sw_ictx); 380 axf->Update(sw->sw_ictx, key, klen); 381 axf->Final(buf, sw->sw_ictx); 382 break; 383 } 384 case CRYPTO_BLAKE2B: 385 case CRYPTO_BLAKE2S: 386 axf->Setkey(sw->sw_ictx, key, klen); 387 axf->Init(sw->sw_ictx); 388 break; 389 default: 390 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 391 "doesn't use keys.\n", __func__, axf->type); 392 } 393 } 394 395 /* 396 * Compute keyed-hash authenticator. 397 */ 398 static int 399 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 400 int flags) 401 { 402 unsigned char aalg[HASH_MAX_LEN]; 403 struct auth_hash *axf; 404 union authctx ctx; 405 int err; 406 407 if (sw->sw_ictx == 0) 408 return EINVAL; 409 410 axf = sw->sw_axf; 411 412 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 413 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 414 415 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 416 417 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 418 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 419 if (err) 420 return err; 421 422 switch (sw->sw_alg) { 423 case CRYPTO_MD5_HMAC: 424 case CRYPTO_SHA1_HMAC: 425 case CRYPTO_SHA2_256_HMAC: 426 case CRYPTO_SHA2_384_HMAC: 427 case CRYPTO_SHA2_512_HMAC: 428 case CRYPTO_RIPEMD160_HMAC: 429 if (sw->sw_octx == NULL) 430 return EINVAL; 431 432 axf->Final(aalg, &ctx); 433 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 434 axf->Update(&ctx, aalg, axf->hashsize); 435 axf->Final(aalg, &ctx); 436 break; 437 438 case CRYPTO_MD5_KPDK: 439 case CRYPTO_SHA1_KPDK: 440 /* If we have no key saved, return error. */ 441 if (sw->sw_octx == NULL) 442 return EINVAL; 443 444 /* 445 * Add the trailing copy of the key (see comment in 446 * swcr_authprepare()) after the data: 447 * ALGO( .., key, algofill ) 448 * and let Final() do the proper, natural "algofill" 449 * padding. 450 */ 451 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 452 axf->Final(aalg, &ctx); 453 break; 454 455 case CRYPTO_BLAKE2B: 456 case CRYPTO_BLAKE2S: 457 case CRYPTO_NULL_HMAC: 458 axf->Final(aalg, &ctx); 459 break; 460 } 461 462 /* Inject the authentication data */ 463 crypto_copyback(flags, buf, crd->crd_inject, 464 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 465 return 0; 466 } 467 468 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 469 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 470 471 /* 472 * Apply a combined encryption-authentication transformation 473 */ 474 static int 475 swcr_authenc(struct cryptop *crp) 476 { 477 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; 478 u_char *blk = (u_char *)blkbuf; 479 u_char aalg[AALG_MAX_RESULT_LEN]; 480 u_char uaalg[AALG_MAX_RESULT_LEN]; 481 u_char iv[EALG_MAX_BLOCK_LEN]; 482 union authctx ctx; 483 struct cryptodesc *crd, *crda = NULL, *crde = NULL; 484 struct swcr_data *sw, *swa, *swe = NULL; 485 struct auth_hash *axf = NULL; 486 struct enc_xform *exf = NULL; 487 caddr_t buf = (caddr_t)crp->crp_buf; 488 uint32_t *blkp; 489 int aadlen, blksz, i, ivlen, len, iskip, oskip, r; 490 491 ivlen = blksz = iskip = oskip = 0; 492 493 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 494 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; 495 sw && sw->sw_alg != crd->crd_alg; 496 sw = sw->sw_next) 497 ; 498 if (sw == NULL) 499 return (EINVAL); 500 501 switch (sw->sw_alg) { 502 case CRYPTO_AES_NIST_GCM_16: 503 case CRYPTO_AES_NIST_GMAC: 504 swe = sw; 505 crde = crd; 506 exf = swe->sw_exf; 507 ivlen = 12; 508 break; 509 case CRYPTO_AES_128_NIST_GMAC: 510 case CRYPTO_AES_192_NIST_GMAC: 511 case CRYPTO_AES_256_NIST_GMAC: 512 swa = sw; 513 crda = crd; 514 axf = swa->sw_axf; 515 if (swa->sw_ictx == 0) 516 return (EINVAL); 517 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 518 blksz = axf->blocksize; 519 break; 520 default: 521 return (EINVAL); 522 } 523 } 524 if (crde == NULL || crda == NULL) 525 return (EINVAL); 526 527 if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 && 528 (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0) 529 return (EINVAL); 530 531 if (crde->crd_klen != crda->crd_klen) 532 return (EINVAL); 533 534 /* Initialize the IV */ 535 if (crde->crd_flags & CRD_F_ENCRYPT) { 536 /* IV explicitly provided ? */ 537 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 538 bcopy(crde->crd_iv, iv, ivlen); 539 else 540 arc4rand(iv, ivlen, 0); 541 542 /* Do we need to write the IV */ 543 if (!(crde->crd_flags & CRD_F_IV_PRESENT)) 544 crypto_copyback(crp->crp_flags, buf, crde->crd_inject, 545 ivlen, iv); 546 547 } else { /* Decryption */ 548 /* IV explicitly provided ? */ 549 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 550 bcopy(crde->crd_iv, iv, ivlen); 551 else { 552 /* Get IV off buf */ 553 crypto_copydata(crp->crp_flags, buf, crde->crd_inject, 554 ivlen, iv); 555 } 556 } 557 558 /* Supply MAC with IV */ 559 if (axf->Reinit) 560 axf->Reinit(&ctx, iv, ivlen); 561 562 /* Supply MAC with AAD */ 563 aadlen = crda->crd_len; 564 565 for (i = iskip; i < crda->crd_len; i += blksz) { 566 len = MIN(crda->crd_len - i, blksz - oskip); 567 crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len, 568 blk + oskip); 569 bzero(blk + len + oskip, blksz - len - oskip); 570 axf->Update(&ctx, blk, blksz); 571 oskip = 0; /* reset initial output offset */ 572 } 573 574 if (exf->reinit) 575 exf->reinit(swe->sw_kschedule, iv); 576 577 /* Do encryption/decryption with MAC */ 578 for (i = 0; i < crde->crd_len; i += len) { 579 if (exf->encrypt_multi != NULL) { 580 len = rounddown(crde->crd_len - i, blksz); 581 if (len == 0) 582 len = blksz; 583 else 584 len = MIN(len, sizeof(blkbuf)); 585 } else 586 len = blksz; 587 len = MIN(crde->crd_len - i, len); 588 if (len < blksz) 589 bzero(blk, blksz); 590 crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len, 591 blk); 592 if (crde->crd_flags & CRD_F_ENCRYPT) { 593 if (exf->encrypt_multi != NULL) 594 exf->encrypt_multi(swe->sw_kschedule, blk, 595 len); 596 else 597 exf->encrypt(swe->sw_kschedule, blk); 598 axf->Update(&ctx, blk, len); 599 crypto_copyback(crp->crp_flags, buf, 600 crde->crd_skip + i, len, blk); 601 } else { 602 axf->Update(&ctx, blk, len); 603 } 604 } 605 606 /* Do any required special finalization */ 607 switch (crda->crd_alg) { 608 case CRYPTO_AES_128_NIST_GMAC: 609 case CRYPTO_AES_192_NIST_GMAC: 610 case CRYPTO_AES_256_NIST_GMAC: 611 /* length block */ 612 bzero(blk, blksz); 613 blkp = (uint32_t *)blk + 1; 614 *blkp = htobe32(aadlen * 8); 615 blkp = (uint32_t *)blk + 3; 616 *blkp = htobe32(crde->crd_len * 8); 617 axf->Update(&ctx, blk, blksz); 618 break; 619 } 620 621 /* Finalize MAC */ 622 axf->Final(aalg, &ctx); 623 624 /* Validate tag */ 625 if (!(crde->crd_flags & CRD_F_ENCRYPT)) { 626 crypto_copydata(crp->crp_flags, buf, crda->crd_inject, 627 axf->hashsize, uaalg); 628 629 r = timingsafe_bcmp(aalg, uaalg, axf->hashsize); 630 if (r == 0) { 631 /* tag matches, decrypt data */ 632 for (i = 0; i < crde->crd_len; i += blksz) { 633 len = MIN(crde->crd_len - i, blksz); 634 if (len < blksz) 635 bzero(blk, blksz); 636 crypto_copydata(crp->crp_flags, buf, 637 crde->crd_skip + i, len, blk); 638 exf->decrypt(swe->sw_kschedule, blk); 639 crypto_copyback(crp->crp_flags, buf, 640 crde->crd_skip + i, len, blk); 641 } 642 } else 643 return (EBADMSG); 644 } else { 645 /* Inject the authentication data */ 646 crypto_copyback(crp->crp_flags, buf, crda->crd_inject, 647 axf->hashsize, aalg); 648 } 649 650 return (0); 651 } 652 653 /* 654 * Apply a compression/decompression algorithm 655 */ 656 static int 657 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 658 caddr_t buf, int flags) 659 { 660 u_int8_t *data, *out; 661 struct comp_algo *cxf; 662 int adj; 663 u_int32_t result; 664 665 cxf = sw->sw_cxf; 666 667 /* We must handle the whole buffer of data in one time 668 * then if there is not all the data in the mbuf, we must 669 * copy in a buffer. 670 */ 671 672 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 673 if (data == NULL) 674 return (EINVAL); 675 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 676 677 if (crd->crd_flags & CRD_F_COMP) 678 result = cxf->compress(data, crd->crd_len, &out); 679 else 680 result = cxf->decompress(data, crd->crd_len, &out); 681 682 free(data, M_CRYPTO_DATA); 683 if (result == 0) 684 return EINVAL; 685 686 /* Copy back the (de)compressed data. m_copyback is 687 * extending the mbuf as necessary. 688 */ 689 sw->sw_size = result; 690 /* Check the compressed size when doing compression */ 691 if (crd->crd_flags & CRD_F_COMP) { 692 if (result >= crd->crd_len) { 693 /* Compression was useless, we lost time */ 694 free(out, M_CRYPTO_DATA); 695 return 0; 696 } 697 } 698 699 crypto_copyback(flags, buf, crd->crd_skip, result, out); 700 if (result < crd->crd_len) { 701 adj = result - crd->crd_len; 702 if (flags & CRYPTO_F_IMBUF) { 703 adj = result - crd->crd_len; 704 m_adj((struct mbuf *)buf, adj); 705 } else if (flags & CRYPTO_F_IOV) { 706 struct uio *uio = (struct uio *)buf; 707 int ind; 708 709 adj = crd->crd_len - result; 710 ind = uio->uio_iovcnt - 1; 711 712 while (adj > 0 && ind >= 0) { 713 if (adj < uio->uio_iov[ind].iov_len) { 714 uio->uio_iov[ind].iov_len -= adj; 715 break; 716 } 717 718 adj -= uio->uio_iov[ind].iov_len; 719 uio->uio_iov[ind].iov_len = 0; 720 ind--; 721 uio->uio_iovcnt--; 722 } 723 } 724 } 725 free(out, M_CRYPTO_DATA); 726 return 0; 727 } 728 729 /* 730 * Generate a new software session. 731 */ 732 static int 733 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 734 { 735 struct swcr_data **swd; 736 struct auth_hash *axf; 737 struct enc_xform *txf; 738 struct comp_algo *cxf; 739 u_int32_t i; 740 int len; 741 int error; 742 743 if (sid == NULL || cri == NULL) 744 return EINVAL; 745 746 rw_wlock(&swcr_sessions_lock); 747 if (swcr_sessions) { 748 for (i = 1; i < swcr_sesnum; i++) 749 if (swcr_sessions[i] == NULL) 750 break; 751 } else 752 i = 1; /* NB: to silence compiler warning */ 753 754 if (swcr_sessions == NULL || i == swcr_sesnum) { 755 if (swcr_sessions == NULL) { 756 i = 1; /* We leave swcr_sessions[0] empty */ 757 swcr_sesnum = CRYPTO_SW_SESSIONS; 758 } else 759 swcr_sesnum *= 2; 760 761 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 762 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 763 if (swd == NULL) { 764 /* Reset session number */ 765 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 766 swcr_sesnum = 0; 767 else 768 swcr_sesnum /= 2; 769 rw_wunlock(&swcr_sessions_lock); 770 return ENOBUFS; 771 } 772 773 /* Copy existing sessions */ 774 if (swcr_sessions != NULL) { 775 bcopy(swcr_sessions, swd, 776 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 777 free(swcr_sessions, M_CRYPTO_DATA); 778 } 779 780 swcr_sessions = swd; 781 } 782 783 rw_downgrade(&swcr_sessions_lock); 784 swd = &swcr_sessions[i]; 785 *sid = i; 786 787 while (cri) { 788 *swd = malloc(sizeof(struct swcr_data), 789 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 790 if (*swd == NULL) { 791 swcr_freesession_locked(dev, i); 792 rw_runlock(&swcr_sessions_lock); 793 return ENOBUFS; 794 } 795 796 switch (cri->cri_alg) { 797 case CRYPTO_DES_CBC: 798 txf = &enc_xform_des; 799 goto enccommon; 800 case CRYPTO_3DES_CBC: 801 txf = &enc_xform_3des; 802 goto enccommon; 803 case CRYPTO_BLF_CBC: 804 txf = &enc_xform_blf; 805 goto enccommon; 806 case CRYPTO_CAST_CBC: 807 txf = &enc_xform_cast5; 808 goto enccommon; 809 case CRYPTO_SKIPJACK_CBC: 810 txf = &enc_xform_skipjack; 811 goto enccommon; 812 case CRYPTO_RIJNDAEL128_CBC: 813 txf = &enc_xform_rijndael128; 814 goto enccommon; 815 case CRYPTO_AES_XTS: 816 txf = &enc_xform_aes_xts; 817 goto enccommon; 818 case CRYPTO_AES_ICM: 819 txf = &enc_xform_aes_icm; 820 goto enccommon; 821 case CRYPTO_AES_NIST_GCM_16: 822 txf = &enc_xform_aes_nist_gcm; 823 goto enccommon; 824 case CRYPTO_AES_NIST_GMAC: 825 txf = &enc_xform_aes_nist_gmac; 826 (*swd)->sw_exf = txf; 827 break; 828 case CRYPTO_CAMELLIA_CBC: 829 txf = &enc_xform_camellia; 830 goto enccommon; 831 case CRYPTO_NULL_CBC: 832 txf = &enc_xform_null; 833 goto enccommon; 834 case CRYPTO_CHACHA20: 835 txf = &enc_xform_chacha20; 836 goto enccommon; 837 enccommon: 838 if (cri->cri_key != NULL) { 839 error = txf->setkey(&((*swd)->sw_kschedule), 840 cri->cri_key, cri->cri_klen / 8); 841 if (error) { 842 swcr_freesession_locked(dev, i); 843 rw_runlock(&swcr_sessions_lock); 844 return error; 845 } 846 } 847 (*swd)->sw_exf = txf; 848 break; 849 850 case CRYPTO_MD5_HMAC: 851 axf = &auth_hash_hmac_md5; 852 goto authcommon; 853 case CRYPTO_SHA1_HMAC: 854 axf = &auth_hash_hmac_sha1; 855 goto authcommon; 856 case CRYPTO_SHA2_256_HMAC: 857 axf = &auth_hash_hmac_sha2_256; 858 goto authcommon; 859 case CRYPTO_SHA2_384_HMAC: 860 axf = &auth_hash_hmac_sha2_384; 861 goto authcommon; 862 case CRYPTO_SHA2_512_HMAC: 863 axf = &auth_hash_hmac_sha2_512; 864 goto authcommon; 865 case CRYPTO_NULL_HMAC: 866 axf = &auth_hash_null; 867 goto authcommon; 868 case CRYPTO_RIPEMD160_HMAC: 869 axf = &auth_hash_hmac_ripemd_160; 870 authcommon: 871 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 872 M_NOWAIT); 873 if ((*swd)->sw_ictx == NULL) { 874 swcr_freesession_locked(dev, i); 875 rw_runlock(&swcr_sessions_lock); 876 return ENOBUFS; 877 } 878 879 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 880 M_NOWAIT); 881 if ((*swd)->sw_octx == NULL) { 882 swcr_freesession_locked(dev, i); 883 rw_runlock(&swcr_sessions_lock); 884 return ENOBUFS; 885 } 886 887 if (cri->cri_key != NULL) { 888 swcr_authprepare(axf, *swd, cri->cri_key, 889 cri->cri_klen); 890 } 891 892 (*swd)->sw_mlen = cri->cri_mlen; 893 (*swd)->sw_axf = axf; 894 break; 895 896 case CRYPTO_MD5_KPDK: 897 axf = &auth_hash_key_md5; 898 goto auth2common; 899 900 case CRYPTO_SHA1_KPDK: 901 axf = &auth_hash_key_sha1; 902 auth2common: 903 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 904 M_NOWAIT); 905 if ((*swd)->sw_ictx == NULL) { 906 swcr_freesession_locked(dev, i); 907 rw_runlock(&swcr_sessions_lock); 908 return ENOBUFS; 909 } 910 911 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 912 M_CRYPTO_DATA, M_NOWAIT); 913 if ((*swd)->sw_octx == NULL) { 914 swcr_freesession_locked(dev, i); 915 rw_runlock(&swcr_sessions_lock); 916 return ENOBUFS; 917 } 918 919 /* Store the key so we can "append" it to the payload */ 920 if (cri->cri_key != NULL) { 921 swcr_authprepare(axf, *swd, cri->cri_key, 922 cri->cri_klen); 923 } 924 925 (*swd)->sw_mlen = cri->cri_mlen; 926 (*swd)->sw_axf = axf; 927 break; 928 #ifdef notdef 929 case CRYPTO_MD5: 930 axf = &auth_hash_md5; 931 goto auth3common; 932 933 case CRYPTO_SHA1: 934 axf = &auth_hash_sha1; 935 auth3common: 936 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 937 M_NOWAIT); 938 if ((*swd)->sw_ictx == NULL) { 939 swcr_freesession_locked(dev, i); 940 rw_runlock(&swcr_sessions_lock); 941 return ENOBUFS; 942 } 943 944 axf->Init((*swd)->sw_ictx); 945 (*swd)->sw_mlen = cri->cri_mlen; 946 (*swd)->sw_axf = axf; 947 break; 948 #endif 949 950 case CRYPTO_AES_128_NIST_GMAC: 951 axf = &auth_hash_nist_gmac_aes_128; 952 goto auth4common; 953 954 case CRYPTO_AES_192_NIST_GMAC: 955 axf = &auth_hash_nist_gmac_aes_192; 956 goto auth4common; 957 958 case CRYPTO_AES_256_NIST_GMAC: 959 axf = &auth_hash_nist_gmac_aes_256; 960 auth4common: 961 len = cri->cri_klen / 8; 962 if (len != 16 && len != 24 && len != 32) { 963 swcr_freesession_locked(dev, i); 964 rw_runlock(&swcr_sessions_lock); 965 return EINVAL; 966 } 967 968 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 969 M_NOWAIT); 970 if ((*swd)->sw_ictx == NULL) { 971 swcr_freesession_locked(dev, i); 972 rw_runlock(&swcr_sessions_lock); 973 return ENOBUFS; 974 } 975 axf->Init((*swd)->sw_ictx); 976 axf->Setkey((*swd)->sw_ictx, cri->cri_key, len); 977 (*swd)->sw_axf = axf; 978 break; 979 980 case CRYPTO_BLAKE2B: 981 axf = &auth_hash_blake2b; 982 goto auth5common; 983 case CRYPTO_BLAKE2S: 984 axf = &auth_hash_blake2s; 985 auth5common: 986 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 987 M_NOWAIT); 988 if ((*swd)->sw_ictx == NULL) { 989 swcr_freesession_locked(dev, i); 990 rw_runlock(&swcr_sessions_lock); 991 return ENOBUFS; 992 } 993 axf->Setkey((*swd)->sw_ictx, cri->cri_key, 994 cri->cri_klen / 8); 995 axf->Init((*swd)->sw_ictx); 996 (*swd)->sw_axf = axf; 997 break; 998 999 case CRYPTO_DEFLATE_COMP: 1000 cxf = &comp_algo_deflate; 1001 (*swd)->sw_cxf = cxf; 1002 break; 1003 default: 1004 swcr_freesession_locked(dev, i); 1005 rw_runlock(&swcr_sessions_lock); 1006 return EINVAL; 1007 } 1008 1009 (*swd)->sw_alg = cri->cri_alg; 1010 cri = cri->cri_next; 1011 swd = &((*swd)->sw_next); 1012 } 1013 rw_runlock(&swcr_sessions_lock); 1014 return 0; 1015 } 1016 1017 static int 1018 swcr_freesession(device_t dev, u_int64_t tid) 1019 { 1020 int error; 1021 1022 rw_rlock(&swcr_sessions_lock); 1023 error = swcr_freesession_locked(dev, tid); 1024 rw_runlock(&swcr_sessions_lock); 1025 return error; 1026 } 1027 1028 /* 1029 * Free a session. 1030 */ 1031 static int 1032 swcr_freesession_locked(device_t dev, u_int64_t tid) 1033 { 1034 struct swcr_data *swd; 1035 struct enc_xform *txf; 1036 struct auth_hash *axf; 1037 u_int32_t sid = CRYPTO_SESID2LID(tid); 1038 1039 if (sid > swcr_sesnum || swcr_sessions == NULL || 1040 swcr_sessions[sid] == NULL) 1041 return EINVAL; 1042 1043 /* Silently accept and return */ 1044 if (sid == 0) 1045 return 0; 1046 1047 while ((swd = swcr_sessions[sid]) != NULL) { 1048 swcr_sessions[sid] = swd->sw_next; 1049 1050 switch (swd->sw_alg) { 1051 case CRYPTO_DES_CBC: 1052 case CRYPTO_3DES_CBC: 1053 case CRYPTO_BLF_CBC: 1054 case CRYPTO_CAST_CBC: 1055 case CRYPTO_SKIPJACK_CBC: 1056 case CRYPTO_RIJNDAEL128_CBC: 1057 case CRYPTO_AES_XTS: 1058 case CRYPTO_AES_ICM: 1059 case CRYPTO_AES_NIST_GCM_16: 1060 case CRYPTO_AES_NIST_GMAC: 1061 case CRYPTO_CAMELLIA_CBC: 1062 case CRYPTO_NULL_CBC: 1063 case CRYPTO_CHACHA20: 1064 txf = swd->sw_exf; 1065 1066 if (swd->sw_kschedule) 1067 txf->zerokey(&(swd->sw_kschedule)); 1068 break; 1069 1070 case CRYPTO_MD5_HMAC: 1071 case CRYPTO_SHA1_HMAC: 1072 case CRYPTO_SHA2_256_HMAC: 1073 case CRYPTO_SHA2_384_HMAC: 1074 case CRYPTO_SHA2_512_HMAC: 1075 case CRYPTO_RIPEMD160_HMAC: 1076 case CRYPTO_NULL_HMAC: 1077 axf = swd->sw_axf; 1078 1079 if (swd->sw_ictx) { 1080 bzero(swd->sw_ictx, axf->ctxsize); 1081 free(swd->sw_ictx, M_CRYPTO_DATA); 1082 } 1083 if (swd->sw_octx) { 1084 bzero(swd->sw_octx, axf->ctxsize); 1085 free(swd->sw_octx, M_CRYPTO_DATA); 1086 } 1087 break; 1088 1089 case CRYPTO_MD5_KPDK: 1090 case CRYPTO_SHA1_KPDK: 1091 axf = swd->sw_axf; 1092 1093 if (swd->sw_ictx) { 1094 bzero(swd->sw_ictx, axf->ctxsize); 1095 free(swd->sw_ictx, M_CRYPTO_DATA); 1096 } 1097 if (swd->sw_octx) { 1098 bzero(swd->sw_octx, swd->sw_klen); 1099 free(swd->sw_octx, M_CRYPTO_DATA); 1100 } 1101 break; 1102 1103 case CRYPTO_BLAKE2B: 1104 case CRYPTO_BLAKE2S: 1105 case CRYPTO_MD5: 1106 case CRYPTO_SHA1: 1107 axf = swd->sw_axf; 1108 1109 if (swd->sw_ictx) { 1110 explicit_bzero(swd->sw_ictx, axf->ctxsize); 1111 free(swd->sw_ictx, M_CRYPTO_DATA); 1112 } 1113 break; 1114 1115 case CRYPTO_DEFLATE_COMP: 1116 /* Nothing to do */ 1117 break; 1118 } 1119 1120 free(swd, M_CRYPTO_DATA); 1121 } 1122 return 0; 1123 } 1124 1125 /* 1126 * Process a software request. 1127 */ 1128 static int 1129 swcr_process(device_t dev, struct cryptop *crp, int hint) 1130 { 1131 struct cryptodesc *crd; 1132 struct swcr_data *sw; 1133 u_int32_t lid; 1134 1135 /* Sanity check */ 1136 if (crp == NULL) 1137 return EINVAL; 1138 1139 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 1140 crp->crp_etype = EINVAL; 1141 goto done; 1142 } 1143 1144 lid = CRYPTO_SESID2LID(crp->crp_sid); 1145 rw_rlock(&swcr_sessions_lock); 1146 if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 || 1147 swcr_sessions[lid] == NULL) { 1148 rw_runlock(&swcr_sessions_lock); 1149 crp->crp_etype = ENOENT; 1150 goto done; 1151 } 1152 rw_runlock(&swcr_sessions_lock); 1153 1154 /* Go through crypto descriptors, processing as we go */ 1155 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1156 /* 1157 * Find the crypto context. 1158 * 1159 * XXX Note that the logic here prevents us from having 1160 * XXX the same algorithm multiple times in a session 1161 * XXX (or rather, we can but it won't give us the right 1162 * XXX results). To do that, we'd need some way of differentiating 1163 * XXX between the various instances of an algorithm (so we can 1164 * XXX locate the correct crypto context). 1165 */ 1166 rw_rlock(&swcr_sessions_lock); 1167 if (swcr_sessions == NULL) { 1168 rw_runlock(&swcr_sessions_lock); 1169 crp->crp_etype = ENOENT; 1170 goto done; 1171 } 1172 for (sw = swcr_sessions[lid]; 1173 sw && sw->sw_alg != crd->crd_alg; 1174 sw = sw->sw_next) 1175 ; 1176 rw_runlock(&swcr_sessions_lock); 1177 1178 /* No such context ? */ 1179 if (sw == NULL) { 1180 crp->crp_etype = EINVAL; 1181 goto done; 1182 } 1183 switch (sw->sw_alg) { 1184 case CRYPTO_DES_CBC: 1185 case CRYPTO_3DES_CBC: 1186 case CRYPTO_BLF_CBC: 1187 case CRYPTO_CAST_CBC: 1188 case CRYPTO_SKIPJACK_CBC: 1189 case CRYPTO_RIJNDAEL128_CBC: 1190 case CRYPTO_AES_XTS: 1191 case CRYPTO_AES_ICM: 1192 case CRYPTO_CAMELLIA_CBC: 1193 case CRYPTO_CHACHA20: 1194 if ((crp->crp_etype = swcr_encdec(crd, sw, 1195 crp->crp_buf, crp->crp_flags)) != 0) 1196 goto done; 1197 break; 1198 case CRYPTO_NULL_CBC: 1199 crp->crp_etype = 0; 1200 break; 1201 case CRYPTO_MD5_HMAC: 1202 case CRYPTO_SHA1_HMAC: 1203 case CRYPTO_SHA2_256_HMAC: 1204 case CRYPTO_SHA2_384_HMAC: 1205 case CRYPTO_SHA2_512_HMAC: 1206 case CRYPTO_RIPEMD160_HMAC: 1207 case CRYPTO_NULL_HMAC: 1208 case CRYPTO_MD5_KPDK: 1209 case CRYPTO_SHA1_KPDK: 1210 case CRYPTO_MD5: 1211 case CRYPTO_SHA1: 1212 case CRYPTO_BLAKE2B: 1213 case CRYPTO_BLAKE2S: 1214 if ((crp->crp_etype = swcr_authcompute(crd, sw, 1215 crp->crp_buf, crp->crp_flags)) != 0) 1216 goto done; 1217 break; 1218 1219 case CRYPTO_AES_NIST_GCM_16: 1220 case CRYPTO_AES_NIST_GMAC: 1221 case CRYPTO_AES_128_NIST_GMAC: 1222 case CRYPTO_AES_192_NIST_GMAC: 1223 case CRYPTO_AES_256_NIST_GMAC: 1224 crp->crp_etype = swcr_authenc(crp); 1225 goto done; 1226 1227 case CRYPTO_DEFLATE_COMP: 1228 if ((crp->crp_etype = swcr_compdec(crd, sw, 1229 crp->crp_buf, crp->crp_flags)) != 0) 1230 goto done; 1231 else 1232 crp->crp_olen = (int)sw->sw_size; 1233 break; 1234 1235 default: 1236 /* Unknown/unsupported algorithm */ 1237 crp->crp_etype = EINVAL; 1238 goto done; 1239 } 1240 } 1241 1242 done: 1243 crypto_done(crp); 1244 return 0; 1245 } 1246 1247 static void 1248 swcr_identify(driver_t *drv, device_t parent) 1249 { 1250 /* NB: order 10 is so we get attached after h/w devices */ 1251 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1252 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1253 panic("cryptosoft: could not attach"); 1254 } 1255 1256 static int 1257 swcr_probe(device_t dev) 1258 { 1259 device_set_desc(dev, "software crypto"); 1260 return (BUS_PROBE_NOWILDCARD); 1261 } 1262 1263 static int 1264 swcr_attach(device_t dev) 1265 { 1266 rw_init(&swcr_sessions_lock, "swcr_sessions_lock"); 1267 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 1268 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 1269 1270 swcr_id = crypto_get_driverid(dev, 1271 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1272 if (swcr_id < 0) { 1273 device_printf(dev, "cannot initialize!"); 1274 return ENOMEM; 1275 } 1276 #define REGISTER(alg) \ 1277 crypto_register(swcr_id, alg, 0,0) 1278 REGISTER(CRYPTO_DES_CBC); 1279 REGISTER(CRYPTO_3DES_CBC); 1280 REGISTER(CRYPTO_BLF_CBC); 1281 REGISTER(CRYPTO_CAST_CBC); 1282 REGISTER(CRYPTO_SKIPJACK_CBC); 1283 REGISTER(CRYPTO_NULL_CBC); 1284 REGISTER(CRYPTO_MD5_HMAC); 1285 REGISTER(CRYPTO_SHA1_HMAC); 1286 REGISTER(CRYPTO_SHA2_256_HMAC); 1287 REGISTER(CRYPTO_SHA2_384_HMAC); 1288 REGISTER(CRYPTO_SHA2_512_HMAC); 1289 REGISTER(CRYPTO_RIPEMD160_HMAC); 1290 REGISTER(CRYPTO_NULL_HMAC); 1291 REGISTER(CRYPTO_MD5_KPDK); 1292 REGISTER(CRYPTO_SHA1_KPDK); 1293 REGISTER(CRYPTO_MD5); 1294 REGISTER(CRYPTO_SHA1); 1295 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1296 REGISTER(CRYPTO_AES_XTS); 1297 REGISTER(CRYPTO_AES_ICM); 1298 REGISTER(CRYPTO_AES_NIST_GCM_16); 1299 REGISTER(CRYPTO_AES_NIST_GMAC); 1300 REGISTER(CRYPTO_AES_128_NIST_GMAC); 1301 REGISTER(CRYPTO_AES_192_NIST_GMAC); 1302 REGISTER(CRYPTO_AES_256_NIST_GMAC); 1303 REGISTER(CRYPTO_CAMELLIA_CBC); 1304 REGISTER(CRYPTO_DEFLATE_COMP); 1305 REGISTER(CRYPTO_BLAKE2B); 1306 REGISTER(CRYPTO_BLAKE2S); 1307 REGISTER(CRYPTO_CHACHA20); 1308 #undef REGISTER 1309 1310 return 0; 1311 } 1312 1313 static int 1314 swcr_detach(device_t dev) 1315 { 1316 crypto_unregister_all(swcr_id); 1317 rw_wlock(&swcr_sessions_lock); 1318 free(swcr_sessions, M_CRYPTO_DATA); 1319 swcr_sessions = NULL; 1320 rw_wunlock(&swcr_sessions_lock); 1321 rw_destroy(&swcr_sessions_lock); 1322 return 0; 1323 } 1324 1325 static device_method_t swcr_methods[] = { 1326 DEVMETHOD(device_identify, swcr_identify), 1327 DEVMETHOD(device_probe, swcr_probe), 1328 DEVMETHOD(device_attach, swcr_attach), 1329 DEVMETHOD(device_detach, swcr_detach), 1330 1331 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1332 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1333 DEVMETHOD(cryptodev_process, swcr_process), 1334 1335 {0, 0}, 1336 }; 1337 1338 static driver_t swcr_driver = { 1339 "cryptosoft", 1340 swcr_methods, 1341 0, /* NB: no softc */ 1342 }; 1343 static devclass_t swcr_devclass; 1344 1345 /* 1346 * NB: We explicitly reference the crypto module so we 1347 * get the necessary ordering when built as a loadable 1348 * module. This is required because we bundle the crypto 1349 * module code together with the cryptosoft driver (otherwise 1350 * normal module dependencies would handle things). 1351 */ 1352 extern int crypto_modevent(struct module *, int, void *); 1353 /* XXX where to attach */ 1354 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1355 MODULE_VERSION(cryptosoft, 1); 1356 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1357