1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */ 2 3 /*- 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * Copyright (c) 2002-2006 Sam Leffler, Errno Consulting 6 * 7 * This code was written by Angelos D. Keromytis in Athens, Greece, in 8 * February 2000. Network Security Technologies Inc. (NSTI) kindly 9 * supported the development of this code. 10 * 11 * Copyright (c) 2000, 2001 Angelos D. Keromytis 12 * Copyright (c) 2014 The FreeBSD Foundation 13 * All rights reserved. 14 * 15 * Portions of this software were developed by John-Mark Gurney 16 * under sponsorship of the FreeBSD Foundation and 17 * Rubicon Communications, LLC (Netgate). 18 * 19 * Permission to use, copy, and modify this software with or without fee 20 * is hereby granted, provided that this entire notice is included in 21 * all source code copies of any software which is or includes a copy or 22 * modification of this software. 23 * 24 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 25 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 26 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 27 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 28 * PURPOSE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/malloc.h> 37 #include <sys/mbuf.h> 38 #include <sys/module.h> 39 #include <sys/sysctl.h> 40 #include <sys/errno.h> 41 #include <sys/random.h> 42 #include <sys/kernel.h> 43 #include <sys/uio.h> 44 #include <sys/lock.h> 45 #include <sys/rwlock.h> 46 #include <sys/endian.h> 47 #include <sys/limits.h> 48 49 #include <crypto/blowfish/blowfish.h> 50 #include <crypto/sha1.h> 51 #include <opencrypto/rmd160.h> 52 #include <opencrypto/cast.h> 53 #include <opencrypto/skipjack.h> 54 #include <sys/md5.h> 55 56 #include <opencrypto/cryptodev.h> 57 #include <opencrypto/cryptosoft.h> 58 #include <opencrypto/xform.h> 59 60 #include <sys/kobj.h> 61 #include <sys/bus.h> 62 #include "cryptodev_if.h" 63 64 static int32_t swcr_id; 65 static struct swcr_data **swcr_sessions = NULL; 66 static u_int32_t swcr_sesnum; 67 /* Protects swcr_sessions pointer, not data. */ 68 static struct rwlock swcr_sessions_lock; 69 70 u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN]; 71 u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN]; 72 73 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 74 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int); 75 static int swcr_authenc(struct cryptop *crp); 76 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int); 77 static int swcr_freesession(device_t dev, u_int64_t tid); 78 static int swcr_freesession_locked(device_t dev, u_int64_t tid); 79 80 /* 81 * Apply a symmetric encryption/decryption algorithm. 82 */ 83 static int 84 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 85 int flags) 86 { 87 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat; 88 unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN]; 89 struct enc_xform *exf; 90 int i, j, k, blks, ind, count, ivlen; 91 struct uio *uio, uiolcl; 92 struct iovec iovlcl[4]; 93 struct iovec *iov; 94 int iovcnt, iovalloc; 95 int error; 96 97 error = 0; 98 99 exf = sw->sw_exf; 100 blks = exf->blocksize; 101 ivlen = exf->ivsize; 102 103 /* Check for non-padded data */ 104 if (crd->crd_len % blks) 105 return EINVAL; 106 107 if (crd->crd_alg == CRYPTO_AES_ICM && 108 (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0) 109 return (EINVAL); 110 111 /* Initialize the IV */ 112 if (crd->crd_flags & CRD_F_ENCRYPT) { 113 /* IV explicitly provided ? */ 114 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 115 bcopy(crd->crd_iv, iv, ivlen); 116 else 117 arc4rand(iv, ivlen, 0); 118 119 /* Do we need to write the IV */ 120 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) 121 crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv); 122 123 } else { /* Decryption */ 124 /* IV explicitly provided ? */ 125 if (crd->crd_flags & CRD_F_IV_EXPLICIT) 126 bcopy(crd->crd_iv, iv, ivlen); 127 else { 128 /* Get IV off buf */ 129 crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv); 130 } 131 } 132 133 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { 134 int error; 135 136 if (sw->sw_kschedule) 137 exf->zerokey(&(sw->sw_kschedule)); 138 139 error = exf->setkey(&sw->sw_kschedule, 140 crd->crd_key, crd->crd_klen / 8); 141 if (error) 142 return (error); 143 } 144 145 iov = iovlcl; 146 iovcnt = nitems(iovlcl); 147 iovalloc = 0; 148 uio = &uiolcl; 149 if ((flags & CRYPTO_F_IMBUF) != 0) { 150 error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt, 151 &iovalloc); 152 if (error) 153 return (error); 154 uio->uio_iov = iov; 155 uio->uio_iovcnt = iovcnt; 156 } else if ((flags & CRYPTO_F_IOV) != 0) 157 uio = (struct uio *)buf; 158 else { 159 iov[0].iov_base = buf; 160 iov[0].iov_len = crd->crd_skip + crd->crd_len; 161 uio->uio_iov = iov; 162 uio->uio_iovcnt = 1; 163 } 164 165 ivp = iv; 166 167 if (exf->reinit) { 168 /* 169 * xforms that provide a reinit method perform all IV 170 * handling themselves. 171 */ 172 exf->reinit(sw->sw_kschedule, iv); 173 } 174 175 count = crd->crd_skip; 176 ind = cuio_getptr(uio, count, &k); 177 if (ind == -1) { 178 error = EINVAL; 179 goto out; 180 } 181 182 i = crd->crd_len; 183 184 while (i > 0) { 185 /* 186 * If there's insufficient data at the end of 187 * an iovec, we have to do some copying. 188 */ 189 if (uio->uio_iov[ind].iov_len < k + blks && 190 uio->uio_iov[ind].iov_len != k) { 191 cuio_copydata(uio, count, blks, blk); 192 193 /* Actual encryption/decryption */ 194 if (exf->reinit) { 195 if (crd->crd_flags & CRD_F_ENCRYPT) { 196 exf->encrypt(sw->sw_kschedule, 197 blk); 198 } else { 199 exf->decrypt(sw->sw_kschedule, 200 blk); 201 } 202 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 203 /* XOR with previous block */ 204 for (j = 0; j < blks; j++) 205 blk[j] ^= ivp[j]; 206 207 exf->encrypt(sw->sw_kschedule, blk); 208 209 /* 210 * Keep encrypted block for XOR'ing 211 * with next block 212 */ 213 bcopy(blk, iv, blks); 214 ivp = iv; 215 } else { /* decrypt */ 216 /* 217 * Keep encrypted block for XOR'ing 218 * with next block 219 */ 220 nivp = (ivp == iv) ? iv2 : iv; 221 bcopy(blk, nivp, blks); 222 223 exf->decrypt(sw->sw_kschedule, blk); 224 225 /* XOR with previous block */ 226 for (j = 0; j < blks; j++) 227 blk[j] ^= ivp[j]; 228 229 ivp = nivp; 230 } 231 232 /* Copy back decrypted block */ 233 cuio_copyback(uio, count, blks, blk); 234 235 count += blks; 236 237 /* Advance pointer */ 238 ind = cuio_getptr(uio, count, &k); 239 if (ind == -1) { 240 error = EINVAL; 241 goto out; 242 } 243 244 i -= blks; 245 246 /* Could be done... */ 247 if (i == 0) 248 break; 249 } 250 251 /* 252 * Warning: idat may point to garbage here, but 253 * we only use it in the while() loop, only if 254 * there are indeed enough data. 255 */ 256 idat = (char *)uio->uio_iov[ind].iov_base + k; 257 258 while (uio->uio_iov[ind].iov_len >= k + blks && i > 0) { 259 if (exf->reinit) { 260 if (crd->crd_flags & CRD_F_ENCRYPT) { 261 exf->encrypt(sw->sw_kschedule, 262 idat); 263 } else { 264 exf->decrypt(sw->sw_kschedule, 265 idat); 266 } 267 } else if (crd->crd_flags & CRD_F_ENCRYPT) { 268 /* XOR with previous block/IV */ 269 for (j = 0; j < blks; j++) 270 idat[j] ^= ivp[j]; 271 272 exf->encrypt(sw->sw_kschedule, idat); 273 ivp = idat; 274 } else { /* decrypt */ 275 /* 276 * Keep encrypted block to be used 277 * in next block's processing. 278 */ 279 nivp = (ivp == iv) ? iv2 : iv; 280 bcopy(idat, nivp, blks); 281 282 exf->decrypt(sw->sw_kschedule, idat); 283 284 /* XOR with previous block/IV */ 285 for (j = 0; j < blks; j++) 286 idat[j] ^= ivp[j]; 287 288 ivp = nivp; 289 } 290 291 idat += blks; 292 count += blks; 293 k += blks; 294 i -= blks; 295 } 296 297 /* 298 * Advance to the next iov if the end of the current iov 299 * is aligned with the end of a cipher block. 300 * Note that the code is equivalent to calling: 301 * ind = cuio_getptr(uio, count, &k); 302 */ 303 if (i > 0 && k == uio->uio_iov[ind].iov_len) { 304 k = 0; 305 ind++; 306 if (ind >= uio->uio_iovcnt) { 307 error = EINVAL; 308 goto out; 309 } 310 } 311 } 312 313 out: 314 if (iovalloc) 315 free(iov, M_CRYPTO_DATA); 316 317 return (error); 318 } 319 320 static void 321 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key, 322 int klen) 323 { 324 int k; 325 326 klen /= 8; 327 328 switch (axf->type) { 329 case CRYPTO_MD5_HMAC: 330 case CRYPTO_SHA1_HMAC: 331 case CRYPTO_SHA2_256_HMAC: 332 case CRYPTO_SHA2_384_HMAC: 333 case CRYPTO_SHA2_512_HMAC: 334 case CRYPTO_NULL_HMAC: 335 case CRYPTO_RIPEMD160_HMAC: 336 for (k = 0; k < klen; k++) 337 key[k] ^= HMAC_IPAD_VAL; 338 339 axf->Init(sw->sw_ictx); 340 axf->Update(sw->sw_ictx, key, klen); 341 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen); 342 343 for (k = 0; k < klen; k++) 344 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL); 345 346 axf->Init(sw->sw_octx); 347 axf->Update(sw->sw_octx, key, klen); 348 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen); 349 350 for (k = 0; k < klen; k++) 351 key[k] ^= HMAC_OPAD_VAL; 352 break; 353 case CRYPTO_MD5_KPDK: 354 case CRYPTO_SHA1_KPDK: 355 { 356 /* 357 * We need a buffer that can hold an md5 and a sha1 result 358 * just to throw it away. 359 * What we do here is the initial part of: 360 * ALGO( key, keyfill, .. ) 361 * adding the key to sw_ictx and abusing Final() to get the 362 * "keyfill" padding. 363 * In addition we abuse the sw_octx to save the key to have 364 * it to be able to append it at the end in swcr_authcompute(). 365 */ 366 u_char buf[SHA1_RESULTLEN]; 367 368 sw->sw_klen = klen; 369 bcopy(key, sw->sw_octx, klen); 370 axf->Init(sw->sw_ictx); 371 axf->Update(sw->sw_ictx, key, klen); 372 axf->Final(buf, sw->sw_ictx); 373 break; 374 } 375 case CRYPTO_BLAKE2B: 376 case CRYPTO_BLAKE2S: 377 axf->Setkey(sw->sw_ictx, key, klen); 378 axf->Init(sw->sw_ictx); 379 break; 380 default: 381 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d " 382 "doesn't use keys.\n", __func__, axf->type); 383 } 384 } 385 386 /* 387 * Compute keyed-hash authenticator. 388 */ 389 static int 390 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf, 391 int flags) 392 { 393 unsigned char aalg[HASH_MAX_LEN]; 394 struct auth_hash *axf; 395 union authctx ctx; 396 int err; 397 398 if (sw->sw_ictx == 0) 399 return EINVAL; 400 401 axf = sw->sw_axf; 402 403 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) 404 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen); 405 406 bcopy(sw->sw_ictx, &ctx, axf->ctxsize); 407 408 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len, 409 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx); 410 if (err) 411 return err; 412 413 switch (sw->sw_alg) { 414 case CRYPTO_MD5_HMAC: 415 case CRYPTO_SHA1_HMAC: 416 case CRYPTO_SHA2_256_HMAC: 417 case CRYPTO_SHA2_384_HMAC: 418 case CRYPTO_SHA2_512_HMAC: 419 case CRYPTO_RIPEMD160_HMAC: 420 if (sw->sw_octx == NULL) 421 return EINVAL; 422 423 axf->Final(aalg, &ctx); 424 bcopy(sw->sw_octx, &ctx, axf->ctxsize); 425 axf->Update(&ctx, aalg, axf->hashsize); 426 axf->Final(aalg, &ctx); 427 break; 428 429 case CRYPTO_MD5_KPDK: 430 case CRYPTO_SHA1_KPDK: 431 /* If we have no key saved, return error. */ 432 if (sw->sw_octx == NULL) 433 return EINVAL; 434 435 /* 436 * Add the trailing copy of the key (see comment in 437 * swcr_authprepare()) after the data: 438 * ALGO( .., key, algofill ) 439 * and let Final() do the proper, natural "algofill" 440 * padding. 441 */ 442 axf->Update(&ctx, sw->sw_octx, sw->sw_klen); 443 axf->Final(aalg, &ctx); 444 break; 445 446 case CRYPTO_BLAKE2B: 447 case CRYPTO_BLAKE2S: 448 case CRYPTO_NULL_HMAC: 449 axf->Final(aalg, &ctx); 450 break; 451 } 452 453 /* Inject the authentication data */ 454 crypto_copyback(flags, buf, crd->crd_inject, 455 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg); 456 return 0; 457 } 458 459 CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */ 460 CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */ 461 462 /* 463 * Apply a combined encryption-authentication transformation 464 */ 465 static int 466 swcr_authenc(struct cryptop *crp) 467 { 468 uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))]; 469 u_char *blk = (u_char *)blkbuf; 470 u_char aalg[AALG_MAX_RESULT_LEN]; 471 u_char uaalg[AALG_MAX_RESULT_LEN]; 472 u_char iv[EALG_MAX_BLOCK_LEN]; 473 union authctx ctx; 474 struct cryptodesc *crd, *crda = NULL, *crde = NULL; 475 struct swcr_data *sw, *swa, *swe = NULL; 476 struct auth_hash *axf = NULL; 477 struct enc_xform *exf = NULL; 478 caddr_t buf = (caddr_t)crp->crp_buf; 479 uint32_t *blkp; 480 int aadlen, blksz, i, ivlen, len, iskip, oskip, r; 481 482 ivlen = blksz = iskip = oskip = 0; 483 484 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 485 for (sw = swcr_sessions[crp->crp_sid & 0xffffffff]; 486 sw && sw->sw_alg != crd->crd_alg; 487 sw = sw->sw_next) 488 ; 489 if (sw == NULL) 490 return (EINVAL); 491 492 switch (sw->sw_alg) { 493 case CRYPTO_AES_NIST_GCM_16: 494 case CRYPTO_AES_NIST_GMAC: 495 swe = sw; 496 crde = crd; 497 exf = swe->sw_exf; 498 ivlen = 12; 499 break; 500 case CRYPTO_AES_128_NIST_GMAC: 501 case CRYPTO_AES_192_NIST_GMAC: 502 case CRYPTO_AES_256_NIST_GMAC: 503 swa = sw; 504 crda = crd; 505 axf = swa->sw_axf; 506 if (swa->sw_ictx == 0) 507 return (EINVAL); 508 bcopy(swa->sw_ictx, &ctx, axf->ctxsize); 509 blksz = axf->blocksize; 510 break; 511 default: 512 return (EINVAL); 513 } 514 } 515 if (crde == NULL || crda == NULL) 516 return (EINVAL); 517 518 if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16 && 519 (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0) 520 return (EINVAL); 521 522 if (crde->crd_klen != crda->crd_klen) 523 return (EINVAL); 524 525 /* Initialize the IV */ 526 if (crde->crd_flags & CRD_F_ENCRYPT) { 527 /* IV explicitly provided ? */ 528 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 529 bcopy(crde->crd_iv, iv, ivlen); 530 else 531 arc4rand(iv, ivlen, 0); 532 533 /* Do we need to write the IV */ 534 if (!(crde->crd_flags & CRD_F_IV_PRESENT)) 535 crypto_copyback(crp->crp_flags, buf, crde->crd_inject, 536 ivlen, iv); 537 538 } else { /* Decryption */ 539 /* IV explicitly provided ? */ 540 if (crde->crd_flags & CRD_F_IV_EXPLICIT) 541 bcopy(crde->crd_iv, iv, ivlen); 542 else { 543 /* Get IV off buf */ 544 crypto_copydata(crp->crp_flags, buf, crde->crd_inject, 545 ivlen, iv); 546 } 547 } 548 549 /* Supply MAC with IV */ 550 if (axf->Reinit) 551 axf->Reinit(&ctx, iv, ivlen); 552 553 /* Supply MAC with AAD */ 554 aadlen = crda->crd_len; 555 556 for (i = iskip; i < crda->crd_len; i += blksz) { 557 len = MIN(crda->crd_len - i, blksz - oskip); 558 crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len, 559 blk + oskip); 560 bzero(blk + len + oskip, blksz - len - oskip); 561 axf->Update(&ctx, blk, blksz); 562 oskip = 0; /* reset initial output offset */ 563 } 564 565 if (exf->reinit) 566 exf->reinit(swe->sw_kschedule, iv); 567 568 /* Do encryption/decryption with MAC */ 569 for (i = 0; i < crde->crd_len; i += blksz) { 570 len = MIN(crde->crd_len - i, blksz); 571 if (len < blksz) 572 bzero(blk, blksz); 573 crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len, 574 blk); 575 if (crde->crd_flags & CRD_F_ENCRYPT) { 576 exf->encrypt(swe->sw_kschedule, blk); 577 axf->Update(&ctx, blk, len); 578 crypto_copyback(crp->crp_flags, buf, 579 crde->crd_skip + i, len, blk); 580 } else { 581 axf->Update(&ctx, blk, len); 582 } 583 } 584 585 /* Do any required special finalization */ 586 switch (crda->crd_alg) { 587 case CRYPTO_AES_128_NIST_GMAC: 588 case CRYPTO_AES_192_NIST_GMAC: 589 case CRYPTO_AES_256_NIST_GMAC: 590 /* length block */ 591 bzero(blk, blksz); 592 blkp = (uint32_t *)blk + 1; 593 *blkp = htobe32(aadlen * 8); 594 blkp = (uint32_t *)blk + 3; 595 *blkp = htobe32(crde->crd_len * 8); 596 axf->Update(&ctx, blk, blksz); 597 break; 598 } 599 600 /* Finalize MAC */ 601 axf->Final(aalg, &ctx); 602 603 /* Validate tag */ 604 if (!(crde->crd_flags & CRD_F_ENCRYPT)) { 605 crypto_copydata(crp->crp_flags, buf, crda->crd_inject, 606 axf->hashsize, uaalg); 607 608 r = timingsafe_bcmp(aalg, uaalg, axf->hashsize); 609 if (r == 0) { 610 /* tag matches, decrypt data */ 611 for (i = 0; i < crde->crd_len; i += blksz) { 612 len = MIN(crde->crd_len - i, blksz); 613 if (len < blksz) 614 bzero(blk, blksz); 615 crypto_copydata(crp->crp_flags, buf, 616 crde->crd_skip + i, len, blk); 617 exf->decrypt(swe->sw_kschedule, blk); 618 crypto_copyback(crp->crp_flags, buf, 619 crde->crd_skip + i, len, blk); 620 } 621 } else 622 return (EBADMSG); 623 } else { 624 /* Inject the authentication data */ 625 crypto_copyback(crp->crp_flags, buf, crda->crd_inject, 626 axf->hashsize, aalg); 627 } 628 629 return (0); 630 } 631 632 /* 633 * Apply a compression/decompression algorithm 634 */ 635 static int 636 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw, 637 caddr_t buf, int flags) 638 { 639 u_int8_t *data, *out; 640 struct comp_algo *cxf; 641 int adj; 642 u_int32_t result; 643 644 cxf = sw->sw_cxf; 645 646 /* We must handle the whole buffer of data in one time 647 * then if there is not all the data in the mbuf, we must 648 * copy in a buffer. 649 */ 650 651 data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT); 652 if (data == NULL) 653 return (EINVAL); 654 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data); 655 656 if (crd->crd_flags & CRD_F_COMP) 657 result = cxf->compress(data, crd->crd_len, &out); 658 else 659 result = cxf->decompress(data, crd->crd_len, &out); 660 661 free(data, M_CRYPTO_DATA); 662 if (result == 0) 663 return EINVAL; 664 665 /* Copy back the (de)compressed data. m_copyback is 666 * extending the mbuf as necessary. 667 */ 668 sw->sw_size = result; 669 /* Check the compressed size when doing compression */ 670 if (crd->crd_flags & CRD_F_COMP) { 671 if (result >= crd->crd_len) { 672 /* Compression was useless, we lost time */ 673 free(out, M_CRYPTO_DATA); 674 return 0; 675 } 676 } 677 678 crypto_copyback(flags, buf, crd->crd_skip, result, out); 679 if (result < crd->crd_len) { 680 adj = result - crd->crd_len; 681 if (flags & CRYPTO_F_IMBUF) { 682 adj = result - crd->crd_len; 683 m_adj((struct mbuf *)buf, adj); 684 } else if (flags & CRYPTO_F_IOV) { 685 struct uio *uio = (struct uio *)buf; 686 int ind; 687 688 adj = crd->crd_len - result; 689 ind = uio->uio_iovcnt - 1; 690 691 while (adj > 0 && ind >= 0) { 692 if (adj < uio->uio_iov[ind].iov_len) { 693 uio->uio_iov[ind].iov_len -= adj; 694 break; 695 } 696 697 adj -= uio->uio_iov[ind].iov_len; 698 uio->uio_iov[ind].iov_len = 0; 699 ind--; 700 uio->uio_iovcnt--; 701 } 702 } 703 } 704 free(out, M_CRYPTO_DATA); 705 return 0; 706 } 707 708 /* 709 * Generate a new software session. 710 */ 711 static int 712 swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri) 713 { 714 struct swcr_data **swd; 715 struct auth_hash *axf; 716 struct enc_xform *txf; 717 struct comp_algo *cxf; 718 u_int32_t i; 719 int len; 720 int error; 721 722 if (sid == NULL || cri == NULL) 723 return EINVAL; 724 725 rw_wlock(&swcr_sessions_lock); 726 if (swcr_sessions) { 727 for (i = 1; i < swcr_sesnum; i++) 728 if (swcr_sessions[i] == NULL) 729 break; 730 } else 731 i = 1; /* NB: to silence compiler warning */ 732 733 if (swcr_sessions == NULL || i == swcr_sesnum) { 734 if (swcr_sessions == NULL) { 735 i = 1; /* We leave swcr_sessions[0] empty */ 736 swcr_sesnum = CRYPTO_SW_SESSIONS; 737 } else 738 swcr_sesnum *= 2; 739 740 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *), 741 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 742 if (swd == NULL) { 743 /* Reset session number */ 744 if (swcr_sesnum == CRYPTO_SW_SESSIONS) 745 swcr_sesnum = 0; 746 else 747 swcr_sesnum /= 2; 748 rw_wunlock(&swcr_sessions_lock); 749 return ENOBUFS; 750 } 751 752 /* Copy existing sessions */ 753 if (swcr_sessions != NULL) { 754 bcopy(swcr_sessions, swd, 755 (swcr_sesnum / 2) * sizeof(struct swcr_data *)); 756 free(swcr_sessions, M_CRYPTO_DATA); 757 } 758 759 swcr_sessions = swd; 760 } 761 762 rw_downgrade(&swcr_sessions_lock); 763 swd = &swcr_sessions[i]; 764 *sid = i; 765 766 while (cri) { 767 *swd = malloc(sizeof(struct swcr_data), 768 M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 769 if (*swd == NULL) { 770 swcr_freesession_locked(dev, i); 771 rw_runlock(&swcr_sessions_lock); 772 return ENOBUFS; 773 } 774 775 switch (cri->cri_alg) { 776 case CRYPTO_DES_CBC: 777 txf = &enc_xform_des; 778 goto enccommon; 779 case CRYPTO_3DES_CBC: 780 txf = &enc_xform_3des; 781 goto enccommon; 782 case CRYPTO_BLF_CBC: 783 txf = &enc_xform_blf; 784 goto enccommon; 785 case CRYPTO_CAST_CBC: 786 txf = &enc_xform_cast5; 787 goto enccommon; 788 case CRYPTO_SKIPJACK_CBC: 789 txf = &enc_xform_skipjack; 790 goto enccommon; 791 case CRYPTO_RIJNDAEL128_CBC: 792 txf = &enc_xform_rijndael128; 793 goto enccommon; 794 case CRYPTO_AES_XTS: 795 txf = &enc_xform_aes_xts; 796 goto enccommon; 797 case CRYPTO_AES_ICM: 798 txf = &enc_xform_aes_icm; 799 goto enccommon; 800 case CRYPTO_AES_NIST_GCM_16: 801 txf = &enc_xform_aes_nist_gcm; 802 goto enccommon; 803 case CRYPTO_AES_NIST_GMAC: 804 txf = &enc_xform_aes_nist_gmac; 805 (*swd)->sw_exf = txf; 806 break; 807 case CRYPTO_CAMELLIA_CBC: 808 txf = &enc_xform_camellia; 809 goto enccommon; 810 case CRYPTO_NULL_CBC: 811 txf = &enc_xform_null; 812 goto enccommon; 813 enccommon: 814 if (cri->cri_key != NULL) { 815 error = txf->setkey(&((*swd)->sw_kschedule), 816 cri->cri_key, cri->cri_klen / 8); 817 if (error) { 818 swcr_freesession_locked(dev, i); 819 rw_runlock(&swcr_sessions_lock); 820 return error; 821 } 822 } 823 (*swd)->sw_exf = txf; 824 break; 825 826 case CRYPTO_MD5_HMAC: 827 axf = &auth_hash_hmac_md5; 828 goto authcommon; 829 case CRYPTO_SHA1_HMAC: 830 axf = &auth_hash_hmac_sha1; 831 goto authcommon; 832 case CRYPTO_SHA2_256_HMAC: 833 axf = &auth_hash_hmac_sha2_256; 834 goto authcommon; 835 case CRYPTO_SHA2_384_HMAC: 836 axf = &auth_hash_hmac_sha2_384; 837 goto authcommon; 838 case CRYPTO_SHA2_512_HMAC: 839 axf = &auth_hash_hmac_sha2_512; 840 goto authcommon; 841 case CRYPTO_NULL_HMAC: 842 axf = &auth_hash_null; 843 goto authcommon; 844 case CRYPTO_RIPEMD160_HMAC: 845 axf = &auth_hash_hmac_ripemd_160; 846 authcommon: 847 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 848 M_NOWAIT); 849 if ((*swd)->sw_ictx == NULL) { 850 swcr_freesession_locked(dev, i); 851 rw_runlock(&swcr_sessions_lock); 852 return ENOBUFS; 853 } 854 855 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA, 856 M_NOWAIT); 857 if ((*swd)->sw_octx == NULL) { 858 swcr_freesession_locked(dev, i); 859 rw_runlock(&swcr_sessions_lock); 860 return ENOBUFS; 861 } 862 863 if (cri->cri_key != NULL) { 864 swcr_authprepare(axf, *swd, cri->cri_key, 865 cri->cri_klen); 866 } 867 868 (*swd)->sw_mlen = cri->cri_mlen; 869 (*swd)->sw_axf = axf; 870 break; 871 872 case CRYPTO_MD5_KPDK: 873 axf = &auth_hash_key_md5; 874 goto auth2common; 875 876 case CRYPTO_SHA1_KPDK: 877 axf = &auth_hash_key_sha1; 878 auth2common: 879 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 880 M_NOWAIT); 881 if ((*swd)->sw_ictx == NULL) { 882 swcr_freesession_locked(dev, i); 883 rw_runlock(&swcr_sessions_lock); 884 return ENOBUFS; 885 } 886 887 (*swd)->sw_octx = malloc(cri->cri_klen / 8, 888 M_CRYPTO_DATA, M_NOWAIT); 889 if ((*swd)->sw_octx == NULL) { 890 swcr_freesession_locked(dev, i); 891 rw_runlock(&swcr_sessions_lock); 892 return ENOBUFS; 893 } 894 895 /* Store the key so we can "append" it to the payload */ 896 if (cri->cri_key != NULL) { 897 swcr_authprepare(axf, *swd, cri->cri_key, 898 cri->cri_klen); 899 } 900 901 (*swd)->sw_mlen = cri->cri_mlen; 902 (*swd)->sw_axf = axf; 903 break; 904 #ifdef notdef 905 case CRYPTO_MD5: 906 axf = &auth_hash_md5; 907 goto auth3common; 908 909 case CRYPTO_SHA1: 910 axf = &auth_hash_sha1; 911 auth3common: 912 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 913 M_NOWAIT); 914 if ((*swd)->sw_ictx == NULL) { 915 swcr_freesession_locked(dev, i); 916 rw_runlock(&swcr_sessions_lock); 917 return ENOBUFS; 918 } 919 920 axf->Init((*swd)->sw_ictx); 921 (*swd)->sw_mlen = cri->cri_mlen; 922 (*swd)->sw_axf = axf; 923 break; 924 #endif 925 926 case CRYPTO_AES_128_NIST_GMAC: 927 axf = &auth_hash_nist_gmac_aes_128; 928 goto auth4common; 929 930 case CRYPTO_AES_192_NIST_GMAC: 931 axf = &auth_hash_nist_gmac_aes_192; 932 goto auth4common; 933 934 case CRYPTO_AES_256_NIST_GMAC: 935 axf = &auth_hash_nist_gmac_aes_256; 936 auth4common: 937 len = cri->cri_klen / 8; 938 if (len != 16 && len != 24 && len != 32) { 939 swcr_freesession_locked(dev, i); 940 rw_runlock(&swcr_sessions_lock); 941 return EINVAL; 942 } 943 944 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 945 M_NOWAIT); 946 if ((*swd)->sw_ictx == NULL) { 947 swcr_freesession_locked(dev, i); 948 rw_runlock(&swcr_sessions_lock); 949 return ENOBUFS; 950 } 951 axf->Init((*swd)->sw_ictx); 952 axf->Setkey((*swd)->sw_ictx, cri->cri_key, len); 953 (*swd)->sw_axf = axf; 954 break; 955 956 case CRYPTO_BLAKE2B: 957 axf = &auth_hash_blake2b; 958 goto auth5common; 959 case CRYPTO_BLAKE2S: 960 axf = &auth_hash_blake2s; 961 auth5common: 962 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, 963 M_NOWAIT); 964 if ((*swd)->sw_ictx == NULL) { 965 swcr_freesession_locked(dev, i); 966 rw_runlock(&swcr_sessions_lock); 967 return ENOBUFS; 968 } 969 axf->Setkey((*swd)->sw_ictx, cri->cri_key, 970 cri->cri_klen / 8); 971 axf->Init((*swd)->sw_ictx); 972 (*swd)->sw_axf = axf; 973 break; 974 975 case CRYPTO_DEFLATE_COMP: 976 cxf = &comp_algo_deflate; 977 (*swd)->sw_cxf = cxf; 978 break; 979 default: 980 swcr_freesession_locked(dev, i); 981 rw_runlock(&swcr_sessions_lock); 982 return EINVAL; 983 } 984 985 (*swd)->sw_alg = cri->cri_alg; 986 cri = cri->cri_next; 987 swd = &((*swd)->sw_next); 988 } 989 rw_runlock(&swcr_sessions_lock); 990 return 0; 991 } 992 993 static int 994 swcr_freesession(device_t dev, u_int64_t tid) 995 { 996 int error; 997 998 rw_rlock(&swcr_sessions_lock); 999 error = swcr_freesession_locked(dev, tid); 1000 rw_runlock(&swcr_sessions_lock); 1001 return error; 1002 } 1003 1004 /* 1005 * Free a session. 1006 */ 1007 static int 1008 swcr_freesession_locked(device_t dev, u_int64_t tid) 1009 { 1010 struct swcr_data *swd; 1011 struct enc_xform *txf; 1012 struct auth_hash *axf; 1013 u_int32_t sid = CRYPTO_SESID2LID(tid); 1014 1015 if (sid > swcr_sesnum || swcr_sessions == NULL || 1016 swcr_sessions[sid] == NULL) 1017 return EINVAL; 1018 1019 /* Silently accept and return */ 1020 if (sid == 0) 1021 return 0; 1022 1023 while ((swd = swcr_sessions[sid]) != NULL) { 1024 swcr_sessions[sid] = swd->sw_next; 1025 1026 switch (swd->sw_alg) { 1027 case CRYPTO_DES_CBC: 1028 case CRYPTO_3DES_CBC: 1029 case CRYPTO_BLF_CBC: 1030 case CRYPTO_CAST_CBC: 1031 case CRYPTO_SKIPJACK_CBC: 1032 case CRYPTO_RIJNDAEL128_CBC: 1033 case CRYPTO_AES_XTS: 1034 case CRYPTO_AES_ICM: 1035 case CRYPTO_AES_NIST_GCM_16: 1036 case CRYPTO_AES_NIST_GMAC: 1037 case CRYPTO_CAMELLIA_CBC: 1038 case CRYPTO_NULL_CBC: 1039 txf = swd->sw_exf; 1040 1041 if (swd->sw_kschedule) 1042 txf->zerokey(&(swd->sw_kschedule)); 1043 break; 1044 1045 case CRYPTO_MD5_HMAC: 1046 case CRYPTO_SHA1_HMAC: 1047 case CRYPTO_SHA2_256_HMAC: 1048 case CRYPTO_SHA2_384_HMAC: 1049 case CRYPTO_SHA2_512_HMAC: 1050 case CRYPTO_RIPEMD160_HMAC: 1051 case CRYPTO_NULL_HMAC: 1052 axf = swd->sw_axf; 1053 1054 if (swd->sw_ictx) { 1055 bzero(swd->sw_ictx, axf->ctxsize); 1056 free(swd->sw_ictx, M_CRYPTO_DATA); 1057 } 1058 if (swd->sw_octx) { 1059 bzero(swd->sw_octx, axf->ctxsize); 1060 free(swd->sw_octx, M_CRYPTO_DATA); 1061 } 1062 break; 1063 1064 case CRYPTO_MD5_KPDK: 1065 case CRYPTO_SHA1_KPDK: 1066 axf = swd->sw_axf; 1067 1068 if (swd->sw_ictx) { 1069 bzero(swd->sw_ictx, axf->ctxsize); 1070 free(swd->sw_ictx, M_CRYPTO_DATA); 1071 } 1072 if (swd->sw_octx) { 1073 bzero(swd->sw_octx, swd->sw_klen); 1074 free(swd->sw_octx, M_CRYPTO_DATA); 1075 } 1076 break; 1077 1078 case CRYPTO_BLAKE2B: 1079 case CRYPTO_BLAKE2S: 1080 case CRYPTO_MD5: 1081 case CRYPTO_SHA1: 1082 axf = swd->sw_axf; 1083 1084 if (swd->sw_ictx) { 1085 explicit_bzero(swd->sw_ictx, axf->ctxsize); 1086 free(swd->sw_ictx, M_CRYPTO_DATA); 1087 } 1088 break; 1089 1090 case CRYPTO_DEFLATE_COMP: 1091 /* Nothing to do */ 1092 break; 1093 } 1094 1095 free(swd, M_CRYPTO_DATA); 1096 } 1097 return 0; 1098 } 1099 1100 /* 1101 * Process a software request. 1102 */ 1103 static int 1104 swcr_process(device_t dev, struct cryptop *crp, int hint) 1105 { 1106 struct cryptodesc *crd; 1107 struct swcr_data *sw; 1108 u_int32_t lid; 1109 1110 /* Sanity check */ 1111 if (crp == NULL) 1112 return EINVAL; 1113 1114 if (crp->crp_desc == NULL || crp->crp_buf == NULL) { 1115 crp->crp_etype = EINVAL; 1116 goto done; 1117 } 1118 1119 lid = CRYPTO_SESID2LID(crp->crp_sid); 1120 rw_rlock(&swcr_sessions_lock); 1121 if (swcr_sessions == NULL || lid >= swcr_sesnum || lid == 0 || 1122 swcr_sessions[lid] == NULL) { 1123 rw_runlock(&swcr_sessions_lock); 1124 crp->crp_etype = ENOENT; 1125 goto done; 1126 } 1127 rw_runlock(&swcr_sessions_lock); 1128 1129 /* Go through crypto descriptors, processing as we go */ 1130 for (crd = crp->crp_desc; crd; crd = crd->crd_next) { 1131 /* 1132 * Find the crypto context. 1133 * 1134 * XXX Note that the logic here prevents us from having 1135 * XXX the same algorithm multiple times in a session 1136 * XXX (or rather, we can but it won't give us the right 1137 * XXX results). To do that, we'd need some way of differentiating 1138 * XXX between the various instances of an algorithm (so we can 1139 * XXX locate the correct crypto context). 1140 */ 1141 rw_rlock(&swcr_sessions_lock); 1142 if (swcr_sessions == NULL) { 1143 rw_runlock(&swcr_sessions_lock); 1144 crp->crp_etype = ENOENT; 1145 goto done; 1146 } 1147 for (sw = swcr_sessions[lid]; 1148 sw && sw->sw_alg != crd->crd_alg; 1149 sw = sw->sw_next) 1150 ; 1151 rw_runlock(&swcr_sessions_lock); 1152 1153 /* No such context ? */ 1154 if (sw == NULL) { 1155 crp->crp_etype = EINVAL; 1156 goto done; 1157 } 1158 switch (sw->sw_alg) { 1159 case CRYPTO_DES_CBC: 1160 case CRYPTO_3DES_CBC: 1161 case CRYPTO_BLF_CBC: 1162 case CRYPTO_CAST_CBC: 1163 case CRYPTO_SKIPJACK_CBC: 1164 case CRYPTO_RIJNDAEL128_CBC: 1165 case CRYPTO_AES_XTS: 1166 case CRYPTO_AES_ICM: 1167 case CRYPTO_CAMELLIA_CBC: 1168 if ((crp->crp_etype = swcr_encdec(crd, sw, 1169 crp->crp_buf, crp->crp_flags)) != 0) 1170 goto done; 1171 break; 1172 case CRYPTO_NULL_CBC: 1173 crp->crp_etype = 0; 1174 break; 1175 case CRYPTO_MD5_HMAC: 1176 case CRYPTO_SHA1_HMAC: 1177 case CRYPTO_SHA2_256_HMAC: 1178 case CRYPTO_SHA2_384_HMAC: 1179 case CRYPTO_SHA2_512_HMAC: 1180 case CRYPTO_RIPEMD160_HMAC: 1181 case CRYPTO_NULL_HMAC: 1182 case CRYPTO_MD5_KPDK: 1183 case CRYPTO_SHA1_KPDK: 1184 case CRYPTO_MD5: 1185 case CRYPTO_SHA1: 1186 case CRYPTO_BLAKE2B: 1187 case CRYPTO_BLAKE2S: 1188 if ((crp->crp_etype = swcr_authcompute(crd, sw, 1189 crp->crp_buf, crp->crp_flags)) != 0) 1190 goto done; 1191 break; 1192 1193 case CRYPTO_AES_NIST_GCM_16: 1194 case CRYPTO_AES_NIST_GMAC: 1195 case CRYPTO_AES_128_NIST_GMAC: 1196 case CRYPTO_AES_192_NIST_GMAC: 1197 case CRYPTO_AES_256_NIST_GMAC: 1198 crp->crp_etype = swcr_authenc(crp); 1199 goto done; 1200 1201 case CRYPTO_DEFLATE_COMP: 1202 if ((crp->crp_etype = swcr_compdec(crd, sw, 1203 crp->crp_buf, crp->crp_flags)) != 0) 1204 goto done; 1205 else 1206 crp->crp_olen = (int)sw->sw_size; 1207 break; 1208 1209 default: 1210 /* Unknown/unsupported algorithm */ 1211 crp->crp_etype = EINVAL; 1212 goto done; 1213 } 1214 } 1215 1216 done: 1217 crypto_done(crp); 1218 return 0; 1219 } 1220 1221 static void 1222 swcr_identify(driver_t *drv, device_t parent) 1223 { 1224 /* NB: order 10 is so we get attached after h/w devices */ 1225 if (device_find_child(parent, "cryptosoft", -1) == NULL && 1226 BUS_ADD_CHILD(parent, 10, "cryptosoft", 0) == 0) 1227 panic("cryptosoft: could not attach"); 1228 } 1229 1230 static int 1231 swcr_probe(device_t dev) 1232 { 1233 device_set_desc(dev, "software crypto"); 1234 return (BUS_PROBE_NOWILDCARD); 1235 } 1236 1237 static int 1238 swcr_attach(device_t dev) 1239 { 1240 rw_init(&swcr_sessions_lock, "swcr_sessions_lock"); 1241 memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN); 1242 memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN); 1243 1244 swcr_id = crypto_get_driverid(dev, 1245 CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC); 1246 if (swcr_id < 0) { 1247 device_printf(dev, "cannot initialize!"); 1248 return ENOMEM; 1249 } 1250 #define REGISTER(alg) \ 1251 crypto_register(swcr_id, alg, 0,0) 1252 REGISTER(CRYPTO_DES_CBC); 1253 REGISTER(CRYPTO_3DES_CBC); 1254 REGISTER(CRYPTO_BLF_CBC); 1255 REGISTER(CRYPTO_CAST_CBC); 1256 REGISTER(CRYPTO_SKIPJACK_CBC); 1257 REGISTER(CRYPTO_NULL_CBC); 1258 REGISTER(CRYPTO_MD5_HMAC); 1259 REGISTER(CRYPTO_SHA1_HMAC); 1260 REGISTER(CRYPTO_SHA2_256_HMAC); 1261 REGISTER(CRYPTO_SHA2_384_HMAC); 1262 REGISTER(CRYPTO_SHA2_512_HMAC); 1263 REGISTER(CRYPTO_RIPEMD160_HMAC); 1264 REGISTER(CRYPTO_NULL_HMAC); 1265 REGISTER(CRYPTO_MD5_KPDK); 1266 REGISTER(CRYPTO_SHA1_KPDK); 1267 REGISTER(CRYPTO_MD5); 1268 REGISTER(CRYPTO_SHA1); 1269 REGISTER(CRYPTO_RIJNDAEL128_CBC); 1270 REGISTER(CRYPTO_AES_XTS); 1271 REGISTER(CRYPTO_AES_ICM); 1272 REGISTER(CRYPTO_AES_NIST_GCM_16); 1273 REGISTER(CRYPTO_AES_NIST_GMAC); 1274 REGISTER(CRYPTO_AES_128_NIST_GMAC); 1275 REGISTER(CRYPTO_AES_192_NIST_GMAC); 1276 REGISTER(CRYPTO_AES_256_NIST_GMAC); 1277 REGISTER(CRYPTO_CAMELLIA_CBC); 1278 REGISTER(CRYPTO_DEFLATE_COMP); 1279 REGISTER(CRYPTO_BLAKE2B); 1280 REGISTER(CRYPTO_BLAKE2S); 1281 #undef REGISTER 1282 1283 return 0; 1284 } 1285 1286 static int 1287 swcr_detach(device_t dev) 1288 { 1289 crypto_unregister_all(swcr_id); 1290 rw_wlock(&swcr_sessions_lock); 1291 free(swcr_sessions, M_CRYPTO_DATA); 1292 swcr_sessions = NULL; 1293 rw_wunlock(&swcr_sessions_lock); 1294 rw_destroy(&swcr_sessions_lock); 1295 return 0; 1296 } 1297 1298 static device_method_t swcr_methods[] = { 1299 DEVMETHOD(device_identify, swcr_identify), 1300 DEVMETHOD(device_probe, swcr_probe), 1301 DEVMETHOD(device_attach, swcr_attach), 1302 DEVMETHOD(device_detach, swcr_detach), 1303 1304 DEVMETHOD(cryptodev_newsession, swcr_newsession), 1305 DEVMETHOD(cryptodev_freesession,swcr_freesession), 1306 DEVMETHOD(cryptodev_process, swcr_process), 1307 1308 {0, 0}, 1309 }; 1310 1311 static driver_t swcr_driver = { 1312 "cryptosoft", 1313 swcr_methods, 1314 0, /* NB: no softc */ 1315 }; 1316 static devclass_t swcr_devclass; 1317 1318 /* 1319 * NB: We explicitly reference the crypto module so we 1320 * get the necessary ordering when built as a loadable 1321 * module. This is required because we bundle the crypto 1322 * module code together with the cryptosoft driver (otherwise 1323 * normal module dependencies would handle things). 1324 */ 1325 extern int crypto_modevent(struct module *, int, void *); 1326 /* XXX where to attach */ 1327 DRIVER_MODULE(cryptosoft, nexus, swcr_driver, swcr_devclass, crypto_modevent,0); 1328 MODULE_VERSION(cryptosoft, 1); 1329 MODULE_DEPEND(cryptosoft, crypto, 1, 1, 1); 1330