1 /*- 2 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 3 * Copyright (c) 2021 The FreeBSD Foundation 4 * 5 * Portions of this software were developed by Ararat River 6 * Consulting, LLC under sponsorship of the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 /* 33 * Cryptographic Subsystem. 34 * 35 * This code is derived from the Openbsd Cryptographic Framework (OCF) 36 * that has the copyright shown below. Very little of the original 37 * code remains. 38 */ 39 40 /*- 41 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 42 * 43 * This code was written by Angelos D. Keromytis in Athens, Greece, in 44 * February 2000. Network Security Technologies Inc. (NSTI) kindly 45 * supported the development of this code. 46 * 47 * Copyright (c) 2000, 2001 Angelos D. Keromytis 48 * 49 * Permission to use, copy, and modify this software with or without fee 50 * is hereby granted, provided that this entire notice is included in 51 * all source code copies of any software which is or includes a copy or 52 * modification of this software. 53 * 54 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 55 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 56 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 57 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 58 * PURPOSE. 59 */ 60 61 #include "opt_compat.h" 62 #include "opt_ddb.h" 63 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/counter.h> 67 #include <sys/kernel.h> 68 #include <sys/kthread.h> 69 #include <sys/linker.h> 70 #include <sys/lock.h> 71 #include <sys/module.h> 72 #include <sys/mutex.h> 73 #include <sys/malloc.h> 74 #include <sys/mbuf.h> 75 #include <sys/proc.h> 76 #include <sys/refcount.h> 77 #include <sys/sdt.h> 78 #include <sys/smp.h> 79 #include <sys/sysctl.h> 80 #include <sys/taskqueue.h> 81 #include <sys/uio.h> 82 83 #include <ddb/ddb.h> 84 85 #include <machine/vmparam.h> 86 #include <vm/uma.h> 87 88 #include <crypto/intake.h> 89 #include <opencrypto/cryptodev.h> 90 #include <opencrypto/xform_auth.h> 91 #include <opencrypto/xform_enc.h> 92 93 #include <sys/kobj.h> 94 #include <sys/bus.h> 95 #include "cryptodev_if.h" 96 97 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 98 #include <machine/pcb.h> 99 #endif 100 101 SDT_PROVIDER_DEFINE(opencrypto); 102 103 /* 104 * Crypto drivers register themselves by allocating a slot in the 105 * crypto_drivers table with crypto_get_driverid(). 106 */ 107 static struct mtx crypto_drivers_mtx; /* lock on driver table */ 108 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 109 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 110 #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 111 112 /* 113 * Crypto device/driver capabilities structure. 114 * 115 * Synchronization: 116 * (d) - protected by CRYPTO_DRIVER_LOCK() 117 * (q) - protected by CRYPTO_Q_LOCK() 118 * Not tagged fields are read-only. 119 */ 120 struct cryptocap { 121 device_t cc_dev; 122 uint32_t cc_hid; 123 uint32_t cc_sessions; /* (d) # of sessions */ 124 125 int cc_flags; /* (d) flags */ 126 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 127 int cc_qblocked; /* (q) symmetric q blocked */ 128 size_t cc_session_size; 129 volatile int cc_refs; 130 }; 131 132 static struct cryptocap **crypto_drivers = NULL; 133 static int crypto_drivers_size = 0; 134 135 struct crypto_session { 136 struct cryptocap *cap; 137 struct crypto_session_params csp; 138 uint64_t id; 139 /* Driver softc follows. */ 140 }; 141 142 static int crp_sleep = 0; 143 static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 144 static struct mtx crypto_q_mtx; 145 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 146 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 147 148 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, 149 "In-kernel cryptography"); 150 151 /* 152 * Taskqueue used to dispatch the crypto requests 153 * that have the CRYPTO_F_ASYNC flag 154 */ 155 static struct taskqueue *crypto_tq; 156 157 /* 158 * Crypto seq numbers are operated on with modular arithmetic 159 */ 160 #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 161 162 struct crypto_ret_worker { 163 struct mtx crypto_ret_mtx; 164 165 TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 166 TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 167 168 uint32_t reorder_ops; /* total ordered sym jobs received */ 169 uint32_t reorder_cur_seq; /* current sym job dispatched */ 170 171 struct thread *td; 172 }; 173 static struct crypto_ret_worker *crypto_ret_workers = NULL; 174 175 #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 176 #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 177 #define FOREACH_CRYPTO_RETW(w) \ 178 for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 179 180 #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 181 #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 182 183 static int crypto_workers_num = 0; 184 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, 185 &crypto_workers_num, 0, 186 "Number of crypto workers used to dispatch crypto jobs"); 187 #ifdef COMPAT_FREEBSD12 188 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 189 &crypto_workers_num, 0, 190 "Number of crypto workers used to dispatch crypto jobs"); 191 #endif 192 193 static uma_zone_t cryptop_zone; 194 195 int crypto_devallowsoft = 0; 196 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RWTUN, 197 &crypto_devallowsoft, 0, 198 "Enable use of software crypto by /dev/crypto"); 199 #ifdef COMPAT_FREEBSD12 200 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RWTUN, 201 &crypto_devallowsoft, 0, 202 "Enable/disable use of software crypto by /dev/crypto"); 203 #endif 204 205 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 206 207 static void crypto_dispatch_thread(void *arg); 208 static struct thread *cryptotd; 209 static void crypto_ret_thread(void *arg); 210 static void crypto_destroy(void); 211 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 212 static void crypto_task_invoke(void *ctx, int pending); 213 static void crypto_batch_enqueue(struct cryptop *crp); 214 215 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; 216 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, 217 cryptostats, nitems(cryptostats), 218 "Crypto system statistics"); 219 220 #define CRYPTOSTAT_INC(stat) do { \ 221 counter_u64_add( \ 222 cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 223 1); \ 224 } while (0) 225 226 static void 227 cryptostats_init(void *arg __unused) 228 { 229 COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); 230 } 231 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); 232 233 static void 234 cryptostats_fini(void *arg __unused) 235 { 236 COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); 237 } 238 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, 239 NULL); 240 241 /* Try to avoid directly exposing the key buffer as a symbol */ 242 static struct keybuf *keybuf; 243 244 static struct keybuf empty_keybuf = { 245 .kb_nents = 0 246 }; 247 248 /* Obtain the key buffer from boot metadata */ 249 static void 250 keybuf_init(void) 251 { 252 caddr_t kmdp; 253 254 kmdp = preload_search_by_type("elf kernel"); 255 256 if (kmdp == NULL) 257 kmdp = preload_search_by_type("elf64 kernel"); 258 259 keybuf = (struct keybuf *)preload_search_info(kmdp, 260 MODINFO_METADATA | MODINFOMD_KEYBUF); 261 262 if (keybuf == NULL) 263 keybuf = &empty_keybuf; 264 } 265 266 /* It'd be nice if we could store these in some kind of secure memory... */ 267 struct keybuf * 268 get_keybuf(void) 269 { 270 271 return (keybuf); 272 } 273 274 static struct cryptocap * 275 cap_ref(struct cryptocap *cap) 276 { 277 278 refcount_acquire(&cap->cc_refs); 279 return (cap); 280 } 281 282 static void 283 cap_rele(struct cryptocap *cap) 284 { 285 286 if (refcount_release(&cap->cc_refs) == 0) 287 return; 288 289 KASSERT(cap->cc_sessions == 0, 290 ("freeing crypto driver with active sessions")); 291 292 free(cap, M_CRYPTO_DATA); 293 } 294 295 static int 296 crypto_init(void) 297 { 298 struct crypto_ret_worker *ret_worker; 299 struct proc *p; 300 int error; 301 302 mtx_init(&crypto_drivers_mtx, "crypto driver table", NULL, MTX_DEF); 303 304 TAILQ_INIT(&crp_q); 305 mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF); 306 307 cryptop_zone = uma_zcreate("cryptop", 308 sizeof(struct cryptop), NULL, NULL, NULL, NULL, 309 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 310 311 crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; 312 crypto_drivers = malloc(crypto_drivers_size * 313 sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 314 315 if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 316 crypto_workers_num = mp_ncpus; 317 318 crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, 319 taskqueue_thread_enqueue, &crypto_tq); 320 321 taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 322 "crypto"); 323 324 p = NULL; 325 error = kproc_kthread_add(crypto_dispatch_thread, NULL, &p, &cryptotd, 326 0, 0, "crypto", "crypto"); 327 if (error) { 328 printf("crypto_init: cannot start crypto thread; error %d", 329 error); 330 goto bad; 331 } 332 333 crypto_ret_workers = mallocarray(crypto_workers_num, 334 sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 335 336 FOREACH_CRYPTO_RETW(ret_worker) { 337 TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 338 TAILQ_INIT(&ret_worker->crp_ret_q); 339 340 ret_worker->reorder_ops = 0; 341 ret_worker->reorder_cur_seq = 0; 342 343 mtx_init(&ret_worker->crypto_ret_mtx, "crypto return queues", 344 NULL, MTX_DEF); 345 346 error = kthread_add(crypto_ret_thread, ret_worker, p, 347 &ret_worker->td, 0, 0, "crypto returns %td", 348 CRYPTO_RETW_ID(ret_worker)); 349 if (error) { 350 printf("crypto_init: cannot start cryptoret thread; error %d", 351 error); 352 goto bad; 353 } 354 } 355 356 keybuf_init(); 357 358 return 0; 359 bad: 360 crypto_destroy(); 361 return error; 362 } 363 364 /* 365 * Signal a crypto thread to terminate. We use the driver 366 * table lock to synchronize the sleep/wakeups so that we 367 * are sure the threads have terminated before we release 368 * the data structures they use. See crypto_finis below 369 * for the other half of this song-and-dance. 370 */ 371 static void 372 crypto_terminate(struct thread **tdp, void *q) 373 { 374 struct thread *td; 375 376 mtx_assert(&crypto_drivers_mtx, MA_OWNED); 377 td = *tdp; 378 *tdp = NULL; 379 if (td != NULL) { 380 wakeup_one(q); 381 mtx_sleep(td, &crypto_drivers_mtx, PWAIT, "crypto_destroy", 0); 382 } 383 } 384 385 static void 386 hmac_init_pad(const struct auth_hash *axf, const char *key, int klen, 387 void *auth_ctx, uint8_t padval) 388 { 389 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; 390 u_int i; 391 392 KASSERT(axf->blocksize <= sizeof(hmac_key), 393 ("Invalid HMAC block size %d", axf->blocksize)); 394 395 /* 396 * If the key is larger than the block size, use the digest of 397 * the key as the key instead. 398 */ 399 memset(hmac_key, 0, sizeof(hmac_key)); 400 if (klen > axf->blocksize) { 401 axf->Init(auth_ctx); 402 axf->Update(auth_ctx, key, klen); 403 axf->Final(hmac_key, auth_ctx); 404 klen = axf->hashsize; 405 } else 406 memcpy(hmac_key, key, klen); 407 408 for (i = 0; i < axf->blocksize; i++) 409 hmac_key[i] ^= padval; 410 411 axf->Init(auth_ctx); 412 axf->Update(auth_ctx, hmac_key, axf->blocksize); 413 explicit_bzero(hmac_key, sizeof(hmac_key)); 414 } 415 416 void 417 hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, 418 void *auth_ctx) 419 { 420 421 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); 422 } 423 424 void 425 hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, 426 void *auth_ctx) 427 { 428 429 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); 430 } 431 432 static void 433 crypto_destroy(void) 434 { 435 struct crypto_ret_worker *ret_worker; 436 int i; 437 438 /* 439 * Terminate any crypto threads. 440 */ 441 if (crypto_tq != NULL) 442 taskqueue_drain_all(crypto_tq); 443 CRYPTO_DRIVER_LOCK(); 444 crypto_terminate(&cryptotd, &crp_q); 445 FOREACH_CRYPTO_RETW(ret_worker) 446 crypto_terminate(&ret_worker->td, &ret_worker->crp_ret_q); 447 CRYPTO_DRIVER_UNLOCK(); 448 449 /* XXX flush queues??? */ 450 451 /* 452 * Reclaim dynamically allocated resources. 453 */ 454 for (i = 0; i < crypto_drivers_size; i++) { 455 if (crypto_drivers[i] != NULL) 456 cap_rele(crypto_drivers[i]); 457 } 458 free(crypto_drivers, M_CRYPTO_DATA); 459 460 if (cryptop_zone != NULL) 461 uma_zdestroy(cryptop_zone); 462 mtx_destroy(&crypto_q_mtx); 463 FOREACH_CRYPTO_RETW(ret_worker) 464 mtx_destroy(&ret_worker->crypto_ret_mtx); 465 free(crypto_ret_workers, M_CRYPTO_DATA); 466 if (crypto_tq != NULL) 467 taskqueue_free(crypto_tq); 468 mtx_destroy(&crypto_drivers_mtx); 469 } 470 471 uint32_t 472 crypto_ses2hid(crypto_session_t crypto_session) 473 { 474 return (crypto_session->cap->cc_hid); 475 } 476 477 uint32_t 478 crypto_ses2caps(crypto_session_t crypto_session) 479 { 480 return (crypto_session->cap->cc_flags & 0xff000000); 481 } 482 483 void * 484 crypto_get_driver_session(crypto_session_t crypto_session) 485 { 486 return (crypto_session + 1); 487 } 488 489 const struct crypto_session_params * 490 crypto_get_params(crypto_session_t crypto_session) 491 { 492 return (&crypto_session->csp); 493 } 494 495 const struct auth_hash * 496 crypto_auth_hash(const struct crypto_session_params *csp) 497 { 498 499 switch (csp->csp_auth_alg) { 500 case CRYPTO_SHA1_HMAC: 501 return (&auth_hash_hmac_sha1); 502 case CRYPTO_SHA2_224_HMAC: 503 return (&auth_hash_hmac_sha2_224); 504 case CRYPTO_SHA2_256_HMAC: 505 return (&auth_hash_hmac_sha2_256); 506 case CRYPTO_SHA2_384_HMAC: 507 return (&auth_hash_hmac_sha2_384); 508 case CRYPTO_SHA2_512_HMAC: 509 return (&auth_hash_hmac_sha2_512); 510 case CRYPTO_NULL_HMAC: 511 return (&auth_hash_null); 512 case CRYPTO_RIPEMD160_HMAC: 513 return (&auth_hash_hmac_ripemd_160); 514 case CRYPTO_SHA1: 515 return (&auth_hash_sha1); 516 case CRYPTO_SHA2_224: 517 return (&auth_hash_sha2_224); 518 case CRYPTO_SHA2_256: 519 return (&auth_hash_sha2_256); 520 case CRYPTO_SHA2_384: 521 return (&auth_hash_sha2_384); 522 case CRYPTO_SHA2_512: 523 return (&auth_hash_sha2_512); 524 case CRYPTO_AES_NIST_GMAC: 525 switch (csp->csp_auth_klen) { 526 case 128 / 8: 527 return (&auth_hash_nist_gmac_aes_128); 528 case 192 / 8: 529 return (&auth_hash_nist_gmac_aes_192); 530 case 256 / 8: 531 return (&auth_hash_nist_gmac_aes_256); 532 default: 533 return (NULL); 534 } 535 case CRYPTO_BLAKE2B: 536 return (&auth_hash_blake2b); 537 case CRYPTO_BLAKE2S: 538 return (&auth_hash_blake2s); 539 case CRYPTO_POLY1305: 540 return (&auth_hash_poly1305); 541 case CRYPTO_AES_CCM_CBC_MAC: 542 switch (csp->csp_auth_klen) { 543 case 128 / 8: 544 return (&auth_hash_ccm_cbc_mac_128); 545 case 192 / 8: 546 return (&auth_hash_ccm_cbc_mac_192); 547 case 256 / 8: 548 return (&auth_hash_ccm_cbc_mac_256); 549 default: 550 return (NULL); 551 } 552 default: 553 return (NULL); 554 } 555 } 556 557 const struct enc_xform * 558 crypto_cipher(const struct crypto_session_params *csp) 559 { 560 561 switch (csp->csp_cipher_alg) { 562 case CRYPTO_AES_CBC: 563 return (&enc_xform_aes_cbc); 564 case CRYPTO_AES_XTS: 565 return (&enc_xform_aes_xts); 566 case CRYPTO_AES_ICM: 567 return (&enc_xform_aes_icm); 568 case CRYPTO_AES_NIST_GCM_16: 569 return (&enc_xform_aes_nist_gcm); 570 case CRYPTO_CAMELLIA_CBC: 571 return (&enc_xform_camellia); 572 case CRYPTO_NULL_CBC: 573 return (&enc_xform_null); 574 case CRYPTO_CHACHA20: 575 return (&enc_xform_chacha20); 576 case CRYPTO_AES_CCM_16: 577 return (&enc_xform_ccm); 578 case CRYPTO_CHACHA20_POLY1305: 579 return (&enc_xform_chacha20_poly1305); 580 default: 581 return (NULL); 582 } 583 } 584 585 static struct cryptocap * 586 crypto_checkdriver(uint32_t hid) 587 { 588 589 return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); 590 } 591 592 /* 593 * Select a driver for a new session that supports the specified 594 * algorithms and, optionally, is constrained according to the flags. 595 */ 596 static struct cryptocap * 597 crypto_select_driver(const struct crypto_session_params *csp, int flags) 598 { 599 struct cryptocap *cap, *best; 600 int best_match, error, hid; 601 602 CRYPTO_DRIVER_ASSERT(); 603 604 best = NULL; 605 for (hid = 0; hid < crypto_drivers_size; hid++) { 606 /* 607 * If there is no driver for this slot, or the driver 608 * is not appropriate (hardware or software based on 609 * match), then skip. 610 */ 611 cap = crypto_drivers[hid]; 612 if (cap == NULL || 613 (cap->cc_flags & flags) == 0) 614 continue; 615 616 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); 617 if (error >= 0) 618 continue; 619 620 /* 621 * Use the driver with the highest probe value. 622 * Hardware drivers use a higher probe value than 623 * software. In case of a tie, prefer the driver with 624 * the fewest active sessions. 625 */ 626 if (best == NULL || error > best_match || 627 (error == best_match && 628 cap->cc_sessions < best->cc_sessions)) { 629 best = cap; 630 best_match = error; 631 } 632 } 633 return best; 634 } 635 636 static enum alg_type { 637 ALG_NONE = 0, 638 ALG_CIPHER, 639 ALG_DIGEST, 640 ALG_KEYED_DIGEST, 641 ALG_COMPRESSION, 642 ALG_AEAD 643 } alg_types[] = { 644 [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, 645 [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, 646 [CRYPTO_AES_CBC] = ALG_CIPHER, 647 [CRYPTO_SHA1] = ALG_DIGEST, 648 [CRYPTO_NULL_HMAC] = ALG_DIGEST, 649 [CRYPTO_NULL_CBC] = ALG_CIPHER, 650 [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, 651 [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, 652 [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, 653 [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, 654 [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, 655 [CRYPTO_AES_XTS] = ALG_CIPHER, 656 [CRYPTO_AES_ICM] = ALG_CIPHER, 657 [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, 658 [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, 659 [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, 660 [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, 661 [CRYPTO_CHACHA20] = ALG_CIPHER, 662 [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, 663 [CRYPTO_RIPEMD160] = ALG_DIGEST, 664 [CRYPTO_SHA2_224] = ALG_DIGEST, 665 [CRYPTO_SHA2_256] = ALG_DIGEST, 666 [CRYPTO_SHA2_384] = ALG_DIGEST, 667 [CRYPTO_SHA2_512] = ALG_DIGEST, 668 [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, 669 [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, 670 [CRYPTO_AES_CCM_16] = ALG_AEAD, 671 [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD, 672 }; 673 674 static enum alg_type 675 alg_type(int alg) 676 { 677 678 if (alg < nitems(alg_types)) 679 return (alg_types[alg]); 680 return (ALG_NONE); 681 } 682 683 static bool 684 alg_is_compression(int alg) 685 { 686 687 return (alg_type(alg) == ALG_COMPRESSION); 688 } 689 690 static bool 691 alg_is_cipher(int alg) 692 { 693 694 return (alg_type(alg) == ALG_CIPHER); 695 } 696 697 static bool 698 alg_is_digest(int alg) 699 { 700 701 return (alg_type(alg) == ALG_DIGEST || 702 alg_type(alg) == ALG_KEYED_DIGEST); 703 } 704 705 static bool 706 alg_is_keyed_digest(int alg) 707 { 708 709 return (alg_type(alg) == ALG_KEYED_DIGEST); 710 } 711 712 static bool 713 alg_is_aead(int alg) 714 { 715 716 return (alg_type(alg) == ALG_AEAD); 717 } 718 719 static bool 720 ccm_tag_length_valid(int len) 721 { 722 /* RFC 3610 */ 723 switch (len) { 724 case 4: 725 case 6: 726 case 8: 727 case 10: 728 case 12: 729 case 14: 730 case 16: 731 return (true); 732 default: 733 return (false); 734 } 735 } 736 737 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) 738 739 /* Various sanity checks on crypto session parameters. */ 740 static bool 741 check_csp(const struct crypto_session_params *csp) 742 { 743 const struct auth_hash *axf; 744 745 /* Mode-independent checks. */ 746 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) 747 return (false); 748 if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || 749 csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) 750 return (false); 751 if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) 752 return (false); 753 if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) 754 return (false); 755 756 switch (csp->csp_mode) { 757 case CSP_MODE_COMPRESS: 758 if (!alg_is_compression(csp->csp_cipher_alg)) 759 return (false); 760 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) 761 return (false); 762 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 763 return (false); 764 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || 765 csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 766 csp->csp_auth_mlen != 0) 767 return (false); 768 break; 769 case CSP_MODE_CIPHER: 770 if (!alg_is_cipher(csp->csp_cipher_alg)) 771 return (false); 772 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 773 return (false); 774 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 775 if (csp->csp_cipher_klen == 0) 776 return (false); 777 if (csp->csp_ivlen == 0) 778 return (false); 779 } 780 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 781 return (false); 782 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 783 csp->csp_auth_mlen != 0) 784 return (false); 785 break; 786 case CSP_MODE_DIGEST: 787 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) 788 return (false); 789 790 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 791 return (false); 792 793 /* IV is optional for digests (e.g. GMAC). */ 794 switch (csp->csp_auth_alg) { 795 case CRYPTO_AES_CCM_CBC_MAC: 796 if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13) 797 return (false); 798 break; 799 case CRYPTO_AES_NIST_GMAC: 800 if (csp->csp_ivlen != AES_GCM_IV_LEN) 801 return (false); 802 break; 803 default: 804 if (csp->csp_ivlen != 0) 805 return (false); 806 break; 807 } 808 809 if (!alg_is_digest(csp->csp_auth_alg)) 810 return (false); 811 812 /* Key is optional for BLAKE2 digests. */ 813 if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 814 csp->csp_auth_alg == CRYPTO_BLAKE2S) 815 ; 816 else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 817 if (csp->csp_auth_klen == 0) 818 return (false); 819 } else { 820 if (csp->csp_auth_klen != 0) 821 return (false); 822 } 823 if (csp->csp_auth_mlen != 0) { 824 axf = crypto_auth_hash(csp); 825 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 826 return (false); 827 828 if (csp->csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC && 829 !ccm_tag_length_valid(csp->csp_auth_mlen)) 830 return (false); 831 } 832 break; 833 case CSP_MODE_AEAD: 834 if (!alg_is_aead(csp->csp_cipher_alg)) 835 return (false); 836 if (csp->csp_cipher_klen == 0) 837 return (false); 838 if (csp->csp_ivlen == 0 || 839 csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 840 return (false); 841 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) 842 return (false); 843 844 switch (csp->csp_cipher_alg) { 845 case CRYPTO_AES_CCM_16: 846 if (csp->csp_auth_mlen != 0 && 847 !ccm_tag_length_valid(csp->csp_auth_mlen)) 848 return (false); 849 850 if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13) 851 return (false); 852 break; 853 case CRYPTO_AES_NIST_GCM_16: 854 if (csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 855 return (false); 856 857 if (csp->csp_ivlen != AES_GCM_IV_LEN) 858 return (false); 859 break; 860 case CRYPTO_CHACHA20_POLY1305: 861 if (csp->csp_ivlen != 8 && csp->csp_ivlen != 12) 862 return (false); 863 if (csp->csp_auth_mlen > POLY1305_HASH_LEN) 864 return (false); 865 break; 866 } 867 break; 868 case CSP_MODE_ETA: 869 if (!alg_is_cipher(csp->csp_cipher_alg)) 870 return (false); 871 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 872 if (csp->csp_cipher_klen == 0) 873 return (false); 874 if (csp->csp_ivlen == 0) 875 return (false); 876 } 877 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 878 return (false); 879 if (!alg_is_digest(csp->csp_auth_alg)) 880 return (false); 881 882 /* Key is optional for BLAKE2 digests. */ 883 if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 884 csp->csp_auth_alg == CRYPTO_BLAKE2S) 885 ; 886 else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 887 if (csp->csp_auth_klen == 0) 888 return (false); 889 } else { 890 if (csp->csp_auth_klen != 0) 891 return (false); 892 } 893 if (csp->csp_auth_mlen != 0) { 894 axf = crypto_auth_hash(csp); 895 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 896 return (false); 897 } 898 break; 899 default: 900 return (false); 901 } 902 903 return (true); 904 } 905 906 /* 907 * Delete a session after it has been detached from its driver. 908 */ 909 static void 910 crypto_deletesession(crypto_session_t cses) 911 { 912 struct cryptocap *cap; 913 914 cap = cses->cap; 915 916 zfree(cses, M_CRYPTO_DATA); 917 918 CRYPTO_DRIVER_LOCK(); 919 cap->cc_sessions--; 920 if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 921 wakeup(cap); 922 CRYPTO_DRIVER_UNLOCK(); 923 cap_rele(cap); 924 } 925 926 /* 927 * Create a new session. The crid argument specifies a crypto 928 * driver to use or constraints on a driver to select (hardware 929 * only, software only, either). Whatever driver is selected 930 * must be capable of the requested crypto algorithms. 931 */ 932 int 933 crypto_newsession(crypto_session_t *cses, 934 const struct crypto_session_params *csp, int crid) 935 { 936 static uint64_t sessid = 0; 937 crypto_session_t res; 938 struct cryptocap *cap; 939 int err; 940 941 if (!check_csp(csp)) 942 return (EINVAL); 943 944 res = NULL; 945 946 CRYPTO_DRIVER_LOCK(); 947 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 948 /* 949 * Use specified driver; verify it is capable. 950 */ 951 cap = crypto_checkdriver(crid); 952 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) 953 cap = NULL; 954 } else { 955 /* 956 * No requested driver; select based on crid flags. 957 */ 958 cap = crypto_select_driver(csp, crid); 959 } 960 if (cap == NULL) { 961 CRYPTO_DRIVER_UNLOCK(); 962 CRYPTDEB("no driver"); 963 return (EOPNOTSUPP); 964 } 965 cap_ref(cap); 966 cap->cc_sessions++; 967 CRYPTO_DRIVER_UNLOCK(); 968 969 /* Allocate a single block for the generic session and driver softc. */ 970 res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA, 971 M_WAITOK | M_ZERO); 972 res->cap = cap; 973 res->csp = *csp; 974 res->id = atomic_fetchadd_64(&sessid, 1); 975 976 /* Call the driver initialization routine. */ 977 err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); 978 if (err != 0) { 979 CRYPTDEB("dev newsession failed: %d", err); 980 crypto_deletesession(res); 981 return (err); 982 } 983 984 *cses = res; 985 return (0); 986 } 987 988 /* 989 * Delete an existing session (or a reserved session on an unregistered 990 * driver). 991 */ 992 void 993 crypto_freesession(crypto_session_t cses) 994 { 995 struct cryptocap *cap; 996 997 if (cses == NULL) 998 return; 999 1000 cap = cses->cap; 1001 1002 /* Call the driver cleanup routine, if available. */ 1003 CRYPTODEV_FREESESSION(cap->cc_dev, cses); 1004 1005 crypto_deletesession(cses); 1006 } 1007 1008 /* 1009 * Return a new driver id. Registers a driver with the system so that 1010 * it can be probed by subsequent sessions. 1011 */ 1012 int32_t 1013 crypto_get_driverid(device_t dev, size_t sessionsize, int flags) 1014 { 1015 struct cryptocap *cap, **newdrv; 1016 int i; 1017 1018 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 1019 device_printf(dev, 1020 "no flags specified when registering driver\n"); 1021 return -1; 1022 } 1023 1024 cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1025 cap->cc_dev = dev; 1026 cap->cc_session_size = sessionsize; 1027 cap->cc_flags = flags; 1028 refcount_init(&cap->cc_refs, 1); 1029 1030 CRYPTO_DRIVER_LOCK(); 1031 for (;;) { 1032 for (i = 0; i < crypto_drivers_size; i++) { 1033 if (crypto_drivers[i] == NULL) 1034 break; 1035 } 1036 1037 if (i < crypto_drivers_size) 1038 break; 1039 1040 /* Out of entries, allocate some more. */ 1041 1042 if (2 * crypto_drivers_size <= crypto_drivers_size) { 1043 CRYPTO_DRIVER_UNLOCK(); 1044 printf("crypto: driver count wraparound!\n"); 1045 cap_rele(cap); 1046 return (-1); 1047 } 1048 CRYPTO_DRIVER_UNLOCK(); 1049 1050 newdrv = malloc(2 * crypto_drivers_size * 1051 sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1052 1053 CRYPTO_DRIVER_LOCK(); 1054 memcpy(newdrv, crypto_drivers, 1055 crypto_drivers_size * sizeof(*crypto_drivers)); 1056 1057 crypto_drivers_size *= 2; 1058 1059 free(crypto_drivers, M_CRYPTO_DATA); 1060 crypto_drivers = newdrv; 1061 } 1062 1063 cap->cc_hid = i; 1064 crypto_drivers[i] = cap; 1065 CRYPTO_DRIVER_UNLOCK(); 1066 1067 if (bootverbose) 1068 printf("crypto: assign %s driver id %u, flags 0x%x\n", 1069 device_get_nameunit(dev), i, flags); 1070 1071 return i; 1072 } 1073 1074 /* 1075 * Lookup a driver by name. We match against the full device 1076 * name and unit, and against just the name. The latter gives 1077 * us a simple widlcarding by device name. On success return the 1078 * driver/hardware identifier; otherwise return -1. 1079 */ 1080 int 1081 crypto_find_driver(const char *match) 1082 { 1083 struct cryptocap *cap; 1084 int i, len = strlen(match); 1085 1086 CRYPTO_DRIVER_LOCK(); 1087 for (i = 0; i < crypto_drivers_size; i++) { 1088 if (crypto_drivers[i] == NULL) 1089 continue; 1090 cap = crypto_drivers[i]; 1091 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || 1092 strncmp(match, device_get_name(cap->cc_dev), len) == 0) { 1093 CRYPTO_DRIVER_UNLOCK(); 1094 return (i); 1095 } 1096 } 1097 CRYPTO_DRIVER_UNLOCK(); 1098 return (-1); 1099 } 1100 1101 /* 1102 * Return the device_t for the specified driver or NULL 1103 * if the driver identifier is invalid. 1104 */ 1105 device_t 1106 crypto_find_device_byhid(int hid) 1107 { 1108 struct cryptocap *cap; 1109 device_t dev; 1110 1111 dev = NULL; 1112 CRYPTO_DRIVER_LOCK(); 1113 cap = crypto_checkdriver(hid); 1114 if (cap != NULL) 1115 dev = cap->cc_dev; 1116 CRYPTO_DRIVER_UNLOCK(); 1117 return (dev); 1118 } 1119 1120 /* 1121 * Return the device/driver capabilities. 1122 */ 1123 int 1124 crypto_getcaps(int hid) 1125 { 1126 struct cryptocap *cap; 1127 int flags; 1128 1129 flags = 0; 1130 CRYPTO_DRIVER_LOCK(); 1131 cap = crypto_checkdriver(hid); 1132 if (cap != NULL) 1133 flags = cap->cc_flags; 1134 CRYPTO_DRIVER_UNLOCK(); 1135 return (flags); 1136 } 1137 1138 /* 1139 * Unregister all algorithms associated with a crypto driver. 1140 * If there are pending sessions using it, leave enough information 1141 * around so that subsequent calls using those sessions will 1142 * correctly detect the driver has been unregistered and reroute 1143 * requests. 1144 */ 1145 int 1146 crypto_unregister_all(uint32_t driverid) 1147 { 1148 struct cryptocap *cap; 1149 1150 CRYPTO_DRIVER_LOCK(); 1151 cap = crypto_checkdriver(driverid); 1152 if (cap == NULL) { 1153 CRYPTO_DRIVER_UNLOCK(); 1154 return (EINVAL); 1155 } 1156 1157 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1158 crypto_drivers[driverid] = NULL; 1159 1160 /* 1161 * XXX: This doesn't do anything to kick sessions that 1162 * have no pending operations. 1163 */ 1164 while (cap->cc_sessions != 0) 1165 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); 1166 CRYPTO_DRIVER_UNLOCK(); 1167 cap_rele(cap); 1168 1169 return (0); 1170 } 1171 1172 /* 1173 * Clear blockage on a driver. The what parameter indicates whether 1174 * the driver is now ready for cryptop's and/or cryptokop's. 1175 */ 1176 int 1177 crypto_unblock(uint32_t driverid, int what) 1178 { 1179 struct cryptocap *cap; 1180 int err; 1181 1182 CRYPTO_Q_LOCK(); 1183 cap = crypto_checkdriver(driverid); 1184 if (cap != NULL) { 1185 if (what & CRYPTO_SYMQ) 1186 cap->cc_qblocked = 0; 1187 if (crp_sleep) 1188 wakeup_one(&crp_q); 1189 err = 0; 1190 } else 1191 err = EINVAL; 1192 CRYPTO_Q_UNLOCK(); 1193 1194 return err; 1195 } 1196 1197 size_t 1198 crypto_buffer_len(struct crypto_buffer *cb) 1199 { 1200 switch (cb->cb_type) { 1201 case CRYPTO_BUF_CONTIG: 1202 return (cb->cb_buf_len); 1203 case CRYPTO_BUF_MBUF: 1204 if (cb->cb_mbuf->m_flags & M_PKTHDR) 1205 return (cb->cb_mbuf->m_pkthdr.len); 1206 return (m_length(cb->cb_mbuf, NULL)); 1207 case CRYPTO_BUF_SINGLE_MBUF: 1208 return (cb->cb_mbuf->m_len); 1209 case CRYPTO_BUF_VMPAGE: 1210 return (cb->cb_vm_page_len); 1211 case CRYPTO_BUF_UIO: 1212 return (cb->cb_uio->uio_resid); 1213 default: 1214 return (0); 1215 } 1216 } 1217 1218 #ifdef INVARIANTS 1219 /* Various sanity checks on crypto requests. */ 1220 static void 1221 cb_sanity(struct crypto_buffer *cb, const char *name) 1222 { 1223 KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, 1224 ("incoming crp with invalid %s buffer type", name)); 1225 switch (cb->cb_type) { 1226 case CRYPTO_BUF_CONTIG: 1227 KASSERT(cb->cb_buf_len >= 0, 1228 ("incoming crp with -ve %s buffer length", name)); 1229 break; 1230 case CRYPTO_BUF_VMPAGE: 1231 KASSERT(CRYPTO_HAS_VMPAGE, 1232 ("incoming crp uses dmap on supported arch")); 1233 KASSERT(cb->cb_vm_page_len >= 0, 1234 ("incoming crp with -ve %s buffer length", name)); 1235 KASSERT(cb->cb_vm_page_offset >= 0, 1236 ("incoming crp with -ve %s buffer offset", name)); 1237 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, 1238 ("incoming crp with %s buffer offset greater than page size" 1239 , name)); 1240 break; 1241 default: 1242 break; 1243 } 1244 } 1245 1246 static void 1247 crp_sanity(struct cryptop *crp) 1248 { 1249 struct crypto_session_params *csp; 1250 struct crypto_buffer *out; 1251 size_t ilen, len, olen; 1252 1253 KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); 1254 KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && 1255 crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, 1256 ("incoming crp with invalid output buffer type")); 1257 KASSERT(crp->crp_etype == 0, ("incoming crp with error")); 1258 KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), 1259 ("incoming crp already done")); 1260 1261 csp = &crp->crp_session->csp; 1262 cb_sanity(&crp->crp_buf, "input"); 1263 ilen = crypto_buffer_len(&crp->crp_buf); 1264 olen = ilen; 1265 out = NULL; 1266 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { 1267 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { 1268 cb_sanity(&crp->crp_obuf, "output"); 1269 out = &crp->crp_obuf; 1270 olen = crypto_buffer_len(out); 1271 } 1272 } else 1273 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, 1274 ("incoming crp with separate output buffer " 1275 "but no session support")); 1276 1277 switch (csp->csp_mode) { 1278 case CSP_MODE_COMPRESS: 1279 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || 1280 crp->crp_op == CRYPTO_OP_DECOMPRESS, 1281 ("invalid compression op %x", crp->crp_op)); 1282 break; 1283 case CSP_MODE_CIPHER: 1284 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || 1285 crp->crp_op == CRYPTO_OP_DECRYPT, 1286 ("invalid cipher op %x", crp->crp_op)); 1287 break; 1288 case CSP_MODE_DIGEST: 1289 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || 1290 crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, 1291 ("invalid digest op %x", crp->crp_op)); 1292 break; 1293 case CSP_MODE_AEAD: 1294 KASSERT(crp->crp_op == 1295 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1296 crp->crp_op == 1297 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1298 ("invalid AEAD op %x", crp->crp_op)); 1299 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1300 ("AEAD without a separate IV")); 1301 break; 1302 case CSP_MODE_ETA: 1303 KASSERT(crp->crp_op == 1304 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1305 crp->crp_op == 1306 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1307 ("invalid ETA op %x", crp->crp_op)); 1308 break; 1309 } 1310 if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1311 if (crp->crp_aad == NULL) { 1312 KASSERT(crp->crp_aad_start == 0 || 1313 crp->crp_aad_start < ilen, 1314 ("invalid AAD start")); 1315 KASSERT(crp->crp_aad_length != 0 || 1316 crp->crp_aad_start == 0, 1317 ("AAD with zero length and non-zero start")); 1318 KASSERT(crp->crp_aad_length == 0 || 1319 crp->crp_aad_start + crp->crp_aad_length <= ilen, 1320 ("AAD outside input length")); 1321 } else { 1322 KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, 1323 ("session doesn't support separate AAD buffer")); 1324 KASSERT(crp->crp_aad_start == 0, 1325 ("separate AAD buffer with non-zero AAD start")); 1326 KASSERT(crp->crp_aad_length != 0, 1327 ("separate AAD buffer with zero length")); 1328 } 1329 } else { 1330 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && 1331 crp->crp_aad_length == 0, 1332 ("AAD region in request not supporting AAD")); 1333 } 1334 if (csp->csp_ivlen == 0) { 1335 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, 1336 ("IV_SEPARATE set when IV isn't used")); 1337 KASSERT(crp->crp_iv_start == 0, 1338 ("crp_iv_start set when IV isn't used")); 1339 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { 1340 KASSERT(crp->crp_iv_start == 0, 1341 ("IV_SEPARATE used with non-zero IV start")); 1342 } else { 1343 KASSERT(crp->crp_iv_start < ilen, 1344 ("invalid IV start")); 1345 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, 1346 ("IV outside buffer length")); 1347 } 1348 /* XXX: payload_start of 0 should always be < ilen? */ 1349 KASSERT(crp->crp_payload_start == 0 || 1350 crp->crp_payload_start < ilen, 1351 ("invalid payload start")); 1352 KASSERT(crp->crp_payload_start + crp->crp_payload_length <= 1353 ilen, ("payload outside input buffer")); 1354 if (out == NULL) { 1355 KASSERT(crp->crp_payload_output_start == 0, 1356 ("payload output start non-zero without output buffer")); 1357 } else { 1358 KASSERT(crp->crp_payload_output_start == 0 || 1359 crp->crp_payload_output_start < olen, 1360 ("invalid payload output start")); 1361 KASSERT(crp->crp_payload_output_start + 1362 crp->crp_payload_length <= olen, 1363 ("payload outside output buffer")); 1364 } 1365 if (csp->csp_mode == CSP_MODE_DIGEST || 1366 csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1367 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) 1368 len = ilen; 1369 else 1370 len = olen; 1371 KASSERT(crp->crp_digest_start == 0 || 1372 crp->crp_digest_start < len, 1373 ("invalid digest start")); 1374 /* XXX: For the mlen == 0 case this check isn't perfect. */ 1375 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, 1376 ("digest outside buffer")); 1377 } else { 1378 KASSERT(crp->crp_digest_start == 0, 1379 ("non-zero digest start for request without a digest")); 1380 } 1381 if (csp->csp_cipher_klen != 0) 1382 KASSERT(csp->csp_cipher_key != NULL || 1383 crp->crp_cipher_key != NULL, 1384 ("cipher request without a key")); 1385 if (csp->csp_auth_klen != 0) 1386 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, 1387 ("auth request without a key")); 1388 KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); 1389 } 1390 #endif 1391 1392 static int 1393 crypto_dispatch_one(struct cryptop *crp, int hint) 1394 { 1395 struct cryptocap *cap; 1396 int result; 1397 1398 #ifdef INVARIANTS 1399 crp_sanity(crp); 1400 #endif 1401 CRYPTOSTAT_INC(cs_ops); 1402 1403 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 1404 1405 /* 1406 * Caller marked the request to be processed immediately; dispatch it 1407 * directly to the driver unless the driver is currently blocked, in 1408 * which case it is queued for deferred dispatch. 1409 */ 1410 cap = crp->crp_session->cap; 1411 if (!atomic_load_int(&cap->cc_qblocked)) { 1412 result = crypto_invoke(cap, crp, hint); 1413 if (result != ERESTART) 1414 return (result); 1415 1416 /* 1417 * The driver ran out of resources, put the request on the 1418 * queue. 1419 */ 1420 } 1421 crypto_batch_enqueue(crp); 1422 return (0); 1423 } 1424 1425 int 1426 crypto_dispatch(struct cryptop *crp) 1427 { 1428 return (crypto_dispatch_one(crp, 0)); 1429 } 1430 1431 int 1432 crypto_dispatch_async(struct cryptop *crp, int flags) 1433 { 1434 struct crypto_ret_worker *ret_worker; 1435 1436 if (!CRYPTO_SESS_SYNC(crp->crp_session)) { 1437 /* 1438 * The driver issues completions asynchonously, don't bother 1439 * deferring dispatch to a worker thread. 1440 */ 1441 return (crypto_dispatch(crp)); 1442 } 1443 1444 #ifdef INVARIANTS 1445 crp_sanity(crp); 1446 #endif 1447 CRYPTOSTAT_INC(cs_ops); 1448 1449 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 1450 if ((flags & CRYPTO_ASYNC_ORDERED) != 0) { 1451 crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED; 1452 ret_worker = CRYPTO_RETW(crp->crp_retw_id); 1453 CRYPTO_RETW_LOCK(ret_worker); 1454 crp->crp_seq = ret_worker->reorder_ops++; 1455 CRYPTO_RETW_UNLOCK(ret_worker); 1456 } 1457 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 1458 taskqueue_enqueue(crypto_tq, &crp->crp_task); 1459 return (0); 1460 } 1461 1462 void 1463 crypto_dispatch_batch(struct cryptopq *crpq, int flags) 1464 { 1465 struct cryptop *crp; 1466 int hint; 1467 1468 while ((crp = TAILQ_FIRST(crpq)) != NULL) { 1469 hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0; 1470 TAILQ_REMOVE(crpq, crp, crp_next); 1471 if (crypto_dispatch_one(crp, hint) != 0) 1472 crypto_batch_enqueue(crp); 1473 } 1474 } 1475 1476 static void 1477 crypto_batch_enqueue(struct cryptop *crp) 1478 { 1479 1480 CRYPTO_Q_LOCK(); 1481 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 1482 if (crp_sleep) 1483 wakeup_one(&crp_q); 1484 CRYPTO_Q_UNLOCK(); 1485 } 1486 1487 static void 1488 crypto_task_invoke(void *ctx, int pending) 1489 { 1490 struct cryptocap *cap; 1491 struct cryptop *crp; 1492 int result; 1493 1494 crp = (struct cryptop *)ctx; 1495 cap = crp->crp_session->cap; 1496 result = crypto_invoke(cap, crp, 0); 1497 if (result == ERESTART) 1498 crypto_batch_enqueue(crp); 1499 } 1500 1501 /* 1502 * Dispatch a crypto request to the appropriate crypto devices. 1503 */ 1504 static int 1505 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1506 { 1507 1508 KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 1509 KASSERT(crp->crp_callback != NULL, 1510 ("%s: crp->crp_callback == NULL", __func__)); 1511 KASSERT(crp->crp_session != NULL, 1512 ("%s: crp->crp_session == NULL", __func__)); 1513 1514 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1515 struct crypto_session_params csp; 1516 crypto_session_t nses; 1517 1518 /* 1519 * Driver has unregistered; migrate the session and return 1520 * an error to the caller so they'll resubmit the op. 1521 * 1522 * XXX: What if there are more already queued requests for this 1523 * session? 1524 * 1525 * XXX: Real solution is to make sessions refcounted 1526 * and force callers to hold a reference when 1527 * assigning to crp_session. Could maybe change 1528 * crypto_getreq to accept a session pointer to make 1529 * that work. Alternatively, we could abandon the 1530 * notion of rewriting crp_session in requests forcing 1531 * the caller to deal with allocating a new session. 1532 * Perhaps provide a method to allow a crp's session to 1533 * be swapped that callers could use. 1534 */ 1535 csp = crp->crp_session->csp; 1536 crypto_freesession(crp->crp_session); 1537 1538 /* 1539 * XXX: Key pointers may no longer be valid. If we 1540 * really want to support this we need to define the 1541 * KPI such that 'csp' is required to be valid for the 1542 * duration of a session by the caller perhaps. 1543 * 1544 * XXX: If the keys have been changed this will reuse 1545 * the old keys. This probably suggests making 1546 * rekeying more explicit and updating the key 1547 * pointers in 'csp' when the keys change. 1548 */ 1549 if (crypto_newsession(&nses, &csp, 1550 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 1551 crp->crp_session = nses; 1552 1553 crp->crp_etype = EAGAIN; 1554 crypto_done(crp); 1555 return 0; 1556 } else { 1557 /* 1558 * Invoke the driver to process the request. 1559 */ 1560 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1561 } 1562 } 1563 1564 void 1565 crypto_destroyreq(struct cryptop *crp) 1566 { 1567 #ifdef DIAGNOSTIC 1568 { 1569 struct cryptop *crp2; 1570 struct crypto_ret_worker *ret_worker; 1571 1572 CRYPTO_Q_LOCK(); 1573 TAILQ_FOREACH(crp2, &crp_q, crp_next) { 1574 KASSERT(crp2 != crp, 1575 ("Freeing cryptop from the crypto queue (%p).", 1576 crp)); 1577 } 1578 CRYPTO_Q_UNLOCK(); 1579 1580 FOREACH_CRYPTO_RETW(ret_worker) { 1581 CRYPTO_RETW_LOCK(ret_worker); 1582 TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 1583 KASSERT(crp2 != crp, 1584 ("Freeing cryptop from the return queue (%p).", 1585 crp)); 1586 } 1587 CRYPTO_RETW_UNLOCK(ret_worker); 1588 } 1589 } 1590 #endif 1591 } 1592 1593 void 1594 crypto_freereq(struct cryptop *crp) 1595 { 1596 if (crp == NULL) 1597 return; 1598 1599 crypto_destroyreq(crp); 1600 uma_zfree(cryptop_zone, crp); 1601 } 1602 1603 static void 1604 _crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1605 { 1606 crp->crp_session = cses; 1607 } 1608 1609 void 1610 crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1611 { 1612 memset(crp, 0, sizeof(*crp)); 1613 _crypto_initreq(crp, cses); 1614 } 1615 1616 struct cryptop * 1617 crypto_getreq(crypto_session_t cses, int how) 1618 { 1619 struct cryptop *crp; 1620 1621 MPASS(how == M_WAITOK || how == M_NOWAIT); 1622 crp = uma_zalloc(cryptop_zone, how | M_ZERO); 1623 if (crp != NULL) 1624 _crypto_initreq(crp, cses); 1625 return (crp); 1626 } 1627 1628 /* 1629 * Invoke the callback on behalf of the driver. 1630 */ 1631 void 1632 crypto_done(struct cryptop *crp) 1633 { 1634 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 1635 ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 1636 crp->crp_flags |= CRYPTO_F_DONE; 1637 if (crp->crp_etype != 0) 1638 CRYPTOSTAT_INC(cs_errs); 1639 1640 /* 1641 * CBIMM means unconditionally do the callback immediately; 1642 * CBIFSYNC means do the callback immediately only if the 1643 * operation was done synchronously. Both are used to avoid 1644 * doing extraneous context switches; the latter is mostly 1645 * used with the software crypto driver. 1646 */ 1647 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 && 1648 ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 || 1649 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 && 1650 CRYPTO_SESS_SYNC(crp->crp_session)))) { 1651 /* 1652 * Do the callback directly. This is ok when the 1653 * callback routine does very little (e.g. the 1654 * /dev/crypto callback method just does a wakeup). 1655 */ 1656 crp->crp_callback(crp); 1657 } else { 1658 struct crypto_ret_worker *ret_worker; 1659 bool wake; 1660 1661 ret_worker = CRYPTO_RETW(crp->crp_retw_id); 1662 1663 /* 1664 * Normal case; queue the callback for the thread. 1665 */ 1666 CRYPTO_RETW_LOCK(ret_worker); 1667 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) { 1668 struct cryptop *tmp; 1669 1670 TAILQ_FOREACH_REVERSE(tmp, 1671 &ret_worker->crp_ordered_ret_q, cryptop_q, 1672 crp_next) { 1673 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 1674 TAILQ_INSERT_AFTER( 1675 &ret_worker->crp_ordered_ret_q, tmp, 1676 crp, crp_next); 1677 break; 1678 } 1679 } 1680 if (tmp == NULL) { 1681 TAILQ_INSERT_HEAD( 1682 &ret_worker->crp_ordered_ret_q, crp, 1683 crp_next); 1684 } 1685 1686 wake = crp->crp_seq == ret_worker->reorder_cur_seq; 1687 } else { 1688 wake = TAILQ_EMPTY(&ret_worker->crp_ret_q); 1689 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, 1690 crp_next); 1691 } 1692 1693 if (wake) 1694 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 1695 CRYPTO_RETW_UNLOCK(ret_worker); 1696 } 1697 } 1698 1699 /* 1700 * Terminate a thread at module unload. The process that 1701 * initiated this is waiting for us to signal that we're gone; 1702 * wake it up and exit. We use the driver table lock to insure 1703 * we don't do the wakeup before they're waiting. There is no 1704 * race here because the waiter sleeps on the proc lock for the 1705 * thread so it gets notified at the right time because of an 1706 * extra wakeup that's done in exit1(). 1707 */ 1708 static void 1709 crypto_finis(void *chan) 1710 { 1711 CRYPTO_DRIVER_LOCK(); 1712 wakeup_one(chan); 1713 CRYPTO_DRIVER_UNLOCK(); 1714 kthread_exit(); 1715 } 1716 1717 /* 1718 * Crypto thread, dispatches crypto requests. 1719 */ 1720 static void 1721 crypto_dispatch_thread(void *arg __unused) 1722 { 1723 struct cryptop *crp, *submit; 1724 struct cryptocap *cap; 1725 int result, hint; 1726 1727 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1728 fpu_kern_thread(FPU_KERN_NORMAL); 1729 #endif 1730 1731 CRYPTO_Q_LOCK(); 1732 for (;;) { 1733 /* 1734 * Find the first element in the queue that can be 1735 * processed and look-ahead to see if multiple ops 1736 * are ready for the same driver. 1737 */ 1738 submit = NULL; 1739 hint = 0; 1740 TAILQ_FOREACH(crp, &crp_q, crp_next) { 1741 cap = crp->crp_session->cap; 1742 /* 1743 * Driver cannot disappeared when there is an active 1744 * session. 1745 */ 1746 KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1747 __func__, __LINE__)); 1748 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1749 /* Op needs to be migrated, process it. */ 1750 if (submit == NULL) 1751 submit = crp; 1752 break; 1753 } 1754 if (!cap->cc_qblocked) { 1755 if (submit != NULL) { 1756 /* 1757 * We stop on finding another op, 1758 * regardless whether its for the same 1759 * driver or not. We could keep 1760 * searching the queue but it might be 1761 * better to just use a per-driver 1762 * queue instead. 1763 */ 1764 if (submit->crp_session->cap == cap) 1765 hint = CRYPTO_HINT_MORE; 1766 } else { 1767 submit = crp; 1768 } 1769 break; 1770 } 1771 } 1772 if (submit != NULL) { 1773 TAILQ_REMOVE(&crp_q, submit, crp_next); 1774 cap = submit->crp_session->cap; 1775 KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1776 __func__, __LINE__)); 1777 CRYPTO_Q_UNLOCK(); 1778 result = crypto_invoke(cap, submit, hint); 1779 CRYPTO_Q_LOCK(); 1780 if (result == ERESTART) { 1781 /* 1782 * The driver ran out of resources, mark the 1783 * driver ``blocked'' for cryptop's and put 1784 * the request back in the queue. It would 1785 * best to put the request back where we got 1786 * it but that's hard so for now we put it 1787 * at the front. This should be ok; putting 1788 * it at the end does not work. 1789 */ 1790 cap->cc_qblocked = 1; 1791 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 1792 CRYPTOSTAT_INC(cs_blocks); 1793 } 1794 } else { 1795 /* 1796 * Nothing more to be processed. Sleep until we're 1797 * woken because there are more ops to process. 1798 * This happens either by submission or by a driver 1799 * becoming unblocked and notifying us through 1800 * crypto_unblock. Note that when we wakeup we 1801 * start processing each queue again from the 1802 * front. It's not clear that it's important to 1803 * preserve this ordering since ops may finish 1804 * out of order if dispatched to different devices 1805 * and some become blocked while others do not. 1806 */ 1807 crp_sleep = 1; 1808 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 1809 crp_sleep = 0; 1810 if (cryptotd == NULL) 1811 break; 1812 CRYPTOSTAT_INC(cs_intrs); 1813 } 1814 } 1815 CRYPTO_Q_UNLOCK(); 1816 1817 crypto_finis(&crp_q); 1818 } 1819 1820 /* 1821 * Crypto returns thread, does callbacks for processed crypto requests. 1822 * Callbacks are done here, rather than in the crypto drivers, because 1823 * callbacks typically are expensive and would slow interrupt handling. 1824 */ 1825 static void 1826 crypto_ret_thread(void *arg) 1827 { 1828 struct crypto_ret_worker *ret_worker = arg; 1829 struct cryptop *crpt; 1830 1831 CRYPTO_RETW_LOCK(ret_worker); 1832 for (;;) { 1833 /* Harvest return q's for completed ops */ 1834 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 1835 if (crpt != NULL) { 1836 if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 1837 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 1838 ret_worker->reorder_cur_seq++; 1839 } else { 1840 crpt = NULL; 1841 } 1842 } 1843 1844 if (crpt == NULL) { 1845 crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 1846 if (crpt != NULL) 1847 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 1848 } 1849 1850 if (crpt != NULL) { 1851 CRYPTO_RETW_UNLOCK(ret_worker); 1852 /* 1853 * Run callbacks unlocked. 1854 */ 1855 if (crpt != NULL) 1856 crpt->crp_callback(crpt); 1857 CRYPTO_RETW_LOCK(ret_worker); 1858 } else { 1859 /* 1860 * Nothing more to be processed. Sleep until we're 1861 * woken because there are more returns to process. 1862 */ 1863 msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 1864 "crypto_ret_wait", 0); 1865 if (ret_worker->td == NULL) 1866 break; 1867 CRYPTOSTAT_INC(cs_rets); 1868 } 1869 } 1870 CRYPTO_RETW_UNLOCK(ret_worker); 1871 1872 crypto_finis(&ret_worker->crp_ret_q); 1873 } 1874 1875 #ifdef DDB 1876 static void 1877 db_show_drivers(void) 1878 { 1879 int hid; 1880 1881 db_printf("%12s %4s %8s %2s\n" 1882 , "Device" 1883 , "Ses" 1884 , "Flags" 1885 , "QB" 1886 ); 1887 for (hid = 0; hid < crypto_drivers_size; hid++) { 1888 const struct cryptocap *cap = crypto_drivers[hid]; 1889 if (cap == NULL) 1890 continue; 1891 db_printf("%-12s %4u %08x %2u\n" 1892 , device_get_nameunit(cap->cc_dev) 1893 , cap->cc_sessions 1894 , cap->cc_flags 1895 , cap->cc_qblocked 1896 ); 1897 } 1898 } 1899 1900 DB_SHOW_COMMAND(crypto, db_show_crypto) 1901 { 1902 struct cryptop *crp; 1903 struct crypto_ret_worker *ret_worker; 1904 1905 db_show_drivers(); 1906 db_printf("\n"); 1907 1908 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 1909 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 1910 "Device", "Callback"); 1911 TAILQ_FOREACH(crp, &crp_q, crp_next) { 1912 db_printf("%4u %08x %4u %4u %04x %8p %8p\n" 1913 , crp->crp_session->cap->cc_hid 1914 , (int) crypto_ses2caps(crp->crp_session) 1915 , crp->crp_olen 1916 , crp->crp_etype 1917 , crp->crp_flags 1918 , device_get_nameunit(crp->crp_session->cap->cc_dev) 1919 , crp->crp_callback 1920 ); 1921 } 1922 FOREACH_CRYPTO_RETW(ret_worker) { 1923 db_printf("\n%8s %4s %4s %4s %8s\n", 1924 "ret_worker", "HID", "Etype", "Flags", "Callback"); 1925 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 1926 TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 1927 db_printf("%8td %4u %4u %04x %8p\n" 1928 , CRYPTO_RETW_ID(ret_worker) 1929 , crp->crp_session->cap->cc_hid 1930 , crp->crp_etype 1931 , crp->crp_flags 1932 , crp->crp_callback 1933 ); 1934 } 1935 } 1936 } 1937 } 1938 #endif 1939 1940 int crypto_modevent(module_t mod, int type, void *unused); 1941 1942 /* 1943 * Initialization code, both for static and dynamic loading. 1944 * Note this is not invoked with the usual MODULE_DECLARE 1945 * mechanism but instead is listed as a dependency by the 1946 * cryptosoft driver. This guarantees proper ordering of 1947 * calls on module load/unload. 1948 */ 1949 int 1950 crypto_modevent(module_t mod, int type, void *unused) 1951 { 1952 int error = EINVAL; 1953 1954 switch (type) { 1955 case MOD_LOAD: 1956 error = crypto_init(); 1957 if (error == 0 && bootverbose) 1958 printf("crypto: <crypto core>\n"); 1959 break; 1960 case MOD_UNLOAD: 1961 /*XXX disallow if active sessions */ 1962 error = 0; 1963 crypto_destroy(); 1964 return 0; 1965 } 1966 return error; 1967 } 1968 MODULE_VERSION(crypto, 1); 1969 MODULE_DEPEND(crypto, zlib, 1, 1, 1); 1970