1 /*- 2 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 3 * Copyright (c) 2021 The FreeBSD Foundation 4 * 5 * Portions of this software were developed by Ararat River 6 * Consulting, LLC under sponsorship of the FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 /* 33 * Cryptographic Subsystem. 34 * 35 * This code is derived from the Openbsd Cryptographic Framework (OCF) 36 * that has the copyright shown below. Very little of the original 37 * code remains. 38 */ 39 40 /*- 41 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 42 * 43 * This code was written by Angelos D. Keromytis in Athens, Greece, in 44 * February 2000. Network Security Technologies Inc. (NSTI) kindly 45 * supported the development of this code. 46 * 47 * Copyright (c) 2000, 2001 Angelos D. Keromytis 48 * 49 * Permission to use, copy, and modify this software with or without fee 50 * is hereby granted, provided that this entire notice is included in 51 * all source code copies of any software which is or includes a copy or 52 * modification of this software. 53 * 54 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 55 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 56 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 57 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 58 * PURPOSE. 59 */ 60 61 #include "opt_ddb.h" 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/counter.h> 66 #include <sys/kernel.h> 67 #include <sys/kthread.h> 68 #include <sys/linker.h> 69 #include <sys/lock.h> 70 #include <sys/module.h> 71 #include <sys/mutex.h> 72 #include <sys/malloc.h> 73 #include <sys/mbuf.h> 74 #include <sys/proc.h> 75 #include <sys/refcount.h> 76 #include <sys/sdt.h> 77 #include <sys/smp.h> 78 #include <sys/sysctl.h> 79 #include <sys/taskqueue.h> 80 #include <sys/uio.h> 81 82 #include <ddb/ddb.h> 83 84 #include <machine/vmparam.h> 85 #include <vm/uma.h> 86 87 #include <crypto/intake.h> 88 #include <opencrypto/cryptodev.h> 89 #include <opencrypto/xform_auth.h> 90 #include <opencrypto/xform_enc.h> 91 92 #include <sys/kobj.h> 93 #include <sys/bus.h> 94 #include "cryptodev_if.h" 95 96 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 97 #include <machine/pcb.h> 98 #endif 99 100 SDT_PROVIDER_DEFINE(opencrypto); 101 102 /* 103 * Crypto drivers register themselves by allocating a slot in the 104 * crypto_drivers table with crypto_get_driverid(). 105 */ 106 static struct mtx crypto_drivers_mtx; /* lock on driver table */ 107 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 108 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 109 #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 110 111 /* 112 * Crypto device/driver capabilities structure. 113 * 114 * Synchronization: 115 * (d) - protected by CRYPTO_DRIVER_LOCK() 116 * (q) - protected by CRYPTO_Q_LOCK() 117 * Not tagged fields are read-only. 118 */ 119 struct cryptocap { 120 device_t cc_dev; 121 uint32_t cc_hid; 122 uint32_t cc_sessions; /* (d) # of sessions */ 123 124 int cc_flags; /* (d) flags */ 125 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 126 int cc_qblocked; /* (q) symmetric q blocked */ 127 size_t cc_session_size; 128 volatile int cc_refs; 129 }; 130 131 static struct cryptocap **crypto_drivers = NULL; 132 static int crypto_drivers_size = 0; 133 134 struct crypto_session { 135 struct cryptocap *cap; 136 struct crypto_session_params csp; 137 uint64_t id; 138 /* Driver softc follows. */ 139 }; 140 141 static int crp_sleep = 0; 142 static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 143 static struct mtx crypto_q_mtx; 144 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 145 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 146 147 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, 148 "In-kernel cryptography"); 149 150 /* 151 * Taskqueue used to dispatch the crypto requests submitted with 152 * crypto_dispatch_async . 153 */ 154 static struct taskqueue *crypto_tq; 155 156 /* 157 * Crypto seq numbers are operated on with modular arithmetic 158 */ 159 #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 160 161 struct crypto_ret_worker { 162 struct mtx crypto_ret_mtx; 163 164 TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 165 TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 166 167 uint32_t reorder_ops; /* total ordered sym jobs received */ 168 uint32_t reorder_cur_seq; /* current sym job dispatched */ 169 170 struct thread *td; 171 }; 172 static struct crypto_ret_worker *crypto_ret_workers = NULL; 173 174 #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 175 #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 176 #define FOREACH_CRYPTO_RETW(w) \ 177 for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 178 179 #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 180 #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 181 182 static int crypto_workers_num = 0; 183 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, 184 &crypto_workers_num, 0, 185 "Number of crypto workers used to dispatch crypto jobs"); 186 #ifdef COMPAT_FREEBSD12 187 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 188 &crypto_workers_num, 0, 189 "Number of crypto workers used to dispatch crypto jobs"); 190 #endif 191 192 static uma_zone_t cryptop_zone; 193 194 int crypto_devallowsoft = 0; 195 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RWTUN, 196 &crypto_devallowsoft, 0, 197 "Enable use of software crypto by /dev/crypto"); 198 #ifdef COMPAT_FREEBSD12 199 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RWTUN, 200 &crypto_devallowsoft, 0, 201 "Enable/disable use of software crypto by /dev/crypto"); 202 #endif 203 204 #ifdef DIAGNOSTIC 205 bool crypto_destroyreq_check; 206 SYSCTL_BOOL(_kern_crypto, OID_AUTO, destroyreq_check, CTLFLAG_RWTUN, 207 &crypto_destroyreq_check, 0, 208 "Enable checks when destroying a request"); 209 #endif 210 211 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 212 213 static void crypto_dispatch_thread(void *arg); 214 static struct thread *cryptotd; 215 static void crypto_ret_thread(void *arg); 216 static void crypto_destroy(void); 217 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 218 static void crypto_task_invoke(void *ctx, int pending); 219 static void crypto_batch_enqueue(struct cryptop *crp); 220 221 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; 222 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, 223 cryptostats, nitems(cryptostats), 224 "Crypto system statistics"); 225 226 #define CRYPTOSTAT_INC(stat) do { \ 227 counter_u64_add( \ 228 cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 229 1); \ 230 } while (0) 231 232 static void 233 cryptostats_init(void *arg __unused) 234 { 235 COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); 236 } 237 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); 238 239 static void 240 cryptostats_fini(void *arg __unused) 241 { 242 COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); 243 } 244 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, 245 NULL); 246 247 /* Try to avoid directly exposing the key buffer as a symbol */ 248 static struct keybuf *keybuf; 249 250 static struct keybuf empty_keybuf = { 251 .kb_nents = 0 252 }; 253 254 /* Obtain the key buffer from boot metadata */ 255 static void 256 keybuf_init(void) 257 { 258 caddr_t kmdp; 259 260 kmdp = preload_search_by_type("elf kernel"); 261 262 if (kmdp == NULL) 263 kmdp = preload_search_by_type("elf64 kernel"); 264 265 keybuf = (struct keybuf *)preload_search_info(kmdp, 266 MODINFO_METADATA | MODINFOMD_KEYBUF); 267 268 if (keybuf == NULL) 269 keybuf = &empty_keybuf; 270 } 271 272 /* It'd be nice if we could store these in some kind of secure memory... */ 273 struct keybuf * 274 get_keybuf(void) 275 { 276 277 return (keybuf); 278 } 279 280 static struct cryptocap * 281 cap_ref(struct cryptocap *cap) 282 { 283 284 refcount_acquire(&cap->cc_refs); 285 return (cap); 286 } 287 288 static void 289 cap_rele(struct cryptocap *cap) 290 { 291 292 if (refcount_release(&cap->cc_refs) == 0) 293 return; 294 295 KASSERT(cap->cc_sessions == 0, 296 ("freeing crypto driver with active sessions")); 297 298 free(cap, M_CRYPTO_DATA); 299 } 300 301 static int 302 crypto_init(void) 303 { 304 struct crypto_ret_worker *ret_worker; 305 struct proc *p; 306 int error; 307 308 mtx_init(&crypto_drivers_mtx, "crypto driver table", NULL, MTX_DEF); 309 310 TAILQ_INIT(&crp_q); 311 mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF); 312 313 cryptop_zone = uma_zcreate("cryptop", 314 sizeof(struct cryptop), NULL, NULL, NULL, NULL, 315 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 316 317 crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; 318 crypto_drivers = malloc(crypto_drivers_size * 319 sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 320 321 if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 322 crypto_workers_num = mp_ncpus; 323 324 crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, 325 taskqueue_thread_enqueue, &crypto_tq); 326 327 taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 328 "crypto"); 329 330 p = NULL; 331 error = kproc_kthread_add(crypto_dispatch_thread, NULL, &p, &cryptotd, 332 0, 0, "crypto", "crypto"); 333 if (error) { 334 printf("crypto_init: cannot start crypto thread; error %d", 335 error); 336 goto bad; 337 } 338 339 crypto_ret_workers = mallocarray(crypto_workers_num, 340 sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 341 342 FOREACH_CRYPTO_RETW(ret_worker) { 343 TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 344 TAILQ_INIT(&ret_worker->crp_ret_q); 345 346 ret_worker->reorder_ops = 0; 347 ret_worker->reorder_cur_seq = 0; 348 349 mtx_init(&ret_worker->crypto_ret_mtx, "crypto return queues", 350 NULL, MTX_DEF); 351 352 error = kthread_add(crypto_ret_thread, ret_worker, p, 353 &ret_worker->td, 0, 0, "crypto returns %td", 354 CRYPTO_RETW_ID(ret_worker)); 355 if (error) { 356 printf("crypto_init: cannot start cryptoret thread; error %d", 357 error); 358 goto bad; 359 } 360 } 361 362 keybuf_init(); 363 364 return 0; 365 bad: 366 crypto_destroy(); 367 return error; 368 } 369 370 /* 371 * Signal a crypto thread to terminate. We use the driver 372 * table lock to synchronize the sleep/wakeups so that we 373 * are sure the threads have terminated before we release 374 * the data structures they use. See crypto_finis below 375 * for the other half of this song-and-dance. 376 */ 377 static void 378 crypto_terminate(struct thread **tdp, void *q) 379 { 380 struct thread *td; 381 382 mtx_assert(&crypto_drivers_mtx, MA_OWNED); 383 td = *tdp; 384 *tdp = NULL; 385 if (td != NULL) { 386 wakeup_one(q); 387 mtx_sleep(td, &crypto_drivers_mtx, PWAIT, "crypto_destroy", 0); 388 } 389 } 390 391 static void 392 hmac_init_pad(const struct auth_hash *axf, const char *key, int klen, 393 void *auth_ctx, uint8_t padval) 394 { 395 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; 396 u_int i; 397 398 KASSERT(axf->blocksize <= sizeof(hmac_key), 399 ("Invalid HMAC block size %d", axf->blocksize)); 400 401 /* 402 * If the key is larger than the block size, use the digest of 403 * the key as the key instead. 404 */ 405 memset(hmac_key, 0, sizeof(hmac_key)); 406 if (klen > axf->blocksize) { 407 axf->Init(auth_ctx); 408 axf->Update(auth_ctx, key, klen); 409 axf->Final(hmac_key, auth_ctx); 410 klen = axf->hashsize; 411 } else 412 memcpy(hmac_key, key, klen); 413 414 for (i = 0; i < axf->blocksize; i++) 415 hmac_key[i] ^= padval; 416 417 axf->Init(auth_ctx); 418 axf->Update(auth_ctx, hmac_key, axf->blocksize); 419 explicit_bzero(hmac_key, sizeof(hmac_key)); 420 } 421 422 void 423 hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, 424 void *auth_ctx) 425 { 426 427 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); 428 } 429 430 void 431 hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, 432 void *auth_ctx) 433 { 434 435 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); 436 } 437 438 static void 439 crypto_destroy(void) 440 { 441 struct crypto_ret_worker *ret_worker; 442 int i; 443 444 /* 445 * Terminate any crypto threads. 446 */ 447 if (crypto_tq != NULL) 448 taskqueue_drain_all(crypto_tq); 449 CRYPTO_DRIVER_LOCK(); 450 crypto_terminate(&cryptotd, &crp_q); 451 FOREACH_CRYPTO_RETW(ret_worker) 452 crypto_terminate(&ret_worker->td, &ret_worker->crp_ret_q); 453 CRYPTO_DRIVER_UNLOCK(); 454 455 /* XXX flush queues??? */ 456 457 /* 458 * Reclaim dynamically allocated resources. 459 */ 460 for (i = 0; i < crypto_drivers_size; i++) { 461 if (crypto_drivers[i] != NULL) 462 cap_rele(crypto_drivers[i]); 463 } 464 free(crypto_drivers, M_CRYPTO_DATA); 465 466 if (cryptop_zone != NULL) 467 uma_zdestroy(cryptop_zone); 468 mtx_destroy(&crypto_q_mtx); 469 FOREACH_CRYPTO_RETW(ret_worker) 470 mtx_destroy(&ret_worker->crypto_ret_mtx); 471 free(crypto_ret_workers, M_CRYPTO_DATA); 472 if (crypto_tq != NULL) 473 taskqueue_free(crypto_tq); 474 mtx_destroy(&crypto_drivers_mtx); 475 } 476 477 uint32_t 478 crypto_ses2hid(crypto_session_t crypto_session) 479 { 480 return (crypto_session->cap->cc_hid); 481 } 482 483 uint32_t 484 crypto_ses2caps(crypto_session_t crypto_session) 485 { 486 return (crypto_session->cap->cc_flags & 0xff000000); 487 } 488 489 void * 490 crypto_get_driver_session(crypto_session_t crypto_session) 491 { 492 return (crypto_session + 1); 493 } 494 495 const struct crypto_session_params * 496 crypto_get_params(crypto_session_t crypto_session) 497 { 498 return (&crypto_session->csp); 499 } 500 501 const struct auth_hash * 502 crypto_auth_hash(const struct crypto_session_params *csp) 503 { 504 505 switch (csp->csp_auth_alg) { 506 case CRYPTO_SHA1_HMAC: 507 return (&auth_hash_hmac_sha1); 508 case CRYPTO_SHA2_224_HMAC: 509 return (&auth_hash_hmac_sha2_224); 510 case CRYPTO_SHA2_256_HMAC: 511 return (&auth_hash_hmac_sha2_256); 512 case CRYPTO_SHA2_384_HMAC: 513 return (&auth_hash_hmac_sha2_384); 514 case CRYPTO_SHA2_512_HMAC: 515 return (&auth_hash_hmac_sha2_512); 516 case CRYPTO_NULL_HMAC: 517 return (&auth_hash_null); 518 case CRYPTO_RIPEMD160_HMAC: 519 return (&auth_hash_hmac_ripemd_160); 520 case CRYPTO_RIPEMD160: 521 return (&auth_hash_ripemd_160); 522 case CRYPTO_SHA1: 523 return (&auth_hash_sha1); 524 case CRYPTO_SHA2_224: 525 return (&auth_hash_sha2_224); 526 case CRYPTO_SHA2_256: 527 return (&auth_hash_sha2_256); 528 case CRYPTO_SHA2_384: 529 return (&auth_hash_sha2_384); 530 case CRYPTO_SHA2_512: 531 return (&auth_hash_sha2_512); 532 case CRYPTO_AES_NIST_GMAC: 533 switch (csp->csp_auth_klen) { 534 case 128 / 8: 535 return (&auth_hash_nist_gmac_aes_128); 536 case 192 / 8: 537 return (&auth_hash_nist_gmac_aes_192); 538 case 256 / 8: 539 return (&auth_hash_nist_gmac_aes_256); 540 default: 541 return (NULL); 542 } 543 case CRYPTO_BLAKE2B: 544 return (&auth_hash_blake2b); 545 case CRYPTO_BLAKE2S: 546 return (&auth_hash_blake2s); 547 case CRYPTO_POLY1305: 548 return (&auth_hash_poly1305); 549 case CRYPTO_AES_CCM_CBC_MAC: 550 switch (csp->csp_auth_klen) { 551 case 128 / 8: 552 return (&auth_hash_ccm_cbc_mac_128); 553 case 192 / 8: 554 return (&auth_hash_ccm_cbc_mac_192); 555 case 256 / 8: 556 return (&auth_hash_ccm_cbc_mac_256); 557 default: 558 return (NULL); 559 } 560 default: 561 return (NULL); 562 } 563 } 564 565 const struct enc_xform * 566 crypto_cipher(const struct crypto_session_params *csp) 567 { 568 569 switch (csp->csp_cipher_alg) { 570 case CRYPTO_AES_CBC: 571 return (&enc_xform_aes_cbc); 572 case CRYPTO_AES_XTS: 573 return (&enc_xform_aes_xts); 574 case CRYPTO_AES_ICM: 575 return (&enc_xform_aes_icm); 576 case CRYPTO_AES_NIST_GCM_16: 577 return (&enc_xform_aes_nist_gcm); 578 case CRYPTO_CAMELLIA_CBC: 579 return (&enc_xform_camellia); 580 case CRYPTO_NULL_CBC: 581 return (&enc_xform_null); 582 case CRYPTO_CHACHA20: 583 return (&enc_xform_chacha20); 584 case CRYPTO_AES_CCM_16: 585 return (&enc_xform_ccm); 586 case CRYPTO_CHACHA20_POLY1305: 587 return (&enc_xform_chacha20_poly1305); 588 case CRYPTO_XCHACHA20_POLY1305: 589 return (&enc_xform_xchacha20_poly1305); 590 default: 591 return (NULL); 592 } 593 } 594 595 static struct cryptocap * 596 crypto_checkdriver(uint32_t hid) 597 { 598 599 return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); 600 } 601 602 /* 603 * Select a driver for a new session that supports the specified 604 * algorithms and, optionally, is constrained according to the flags. 605 */ 606 static struct cryptocap * 607 crypto_select_driver(const struct crypto_session_params *csp, int flags) 608 { 609 struct cryptocap *cap, *best; 610 int best_match, error, hid; 611 612 CRYPTO_DRIVER_ASSERT(); 613 614 best = NULL; 615 for (hid = 0; hid < crypto_drivers_size; hid++) { 616 /* 617 * If there is no driver for this slot, or the driver 618 * is not appropriate (hardware or software based on 619 * match), then skip. 620 */ 621 cap = crypto_drivers[hid]; 622 if (cap == NULL || 623 (cap->cc_flags & flags) == 0) 624 continue; 625 626 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); 627 if (error >= 0) 628 continue; 629 630 /* 631 * Use the driver with the highest probe value. 632 * Hardware drivers use a higher probe value than 633 * software. In case of a tie, prefer the driver with 634 * the fewest active sessions. 635 */ 636 if (best == NULL || error > best_match || 637 (error == best_match && 638 cap->cc_sessions < best->cc_sessions)) { 639 best = cap; 640 best_match = error; 641 } 642 } 643 return best; 644 } 645 646 static enum alg_type { 647 ALG_NONE = 0, 648 ALG_CIPHER, 649 ALG_DIGEST, 650 ALG_KEYED_DIGEST, 651 ALG_COMPRESSION, 652 ALG_AEAD 653 } alg_types[] = { 654 [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, 655 [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, 656 [CRYPTO_AES_CBC] = ALG_CIPHER, 657 [CRYPTO_SHA1] = ALG_DIGEST, 658 [CRYPTO_NULL_HMAC] = ALG_DIGEST, 659 [CRYPTO_NULL_CBC] = ALG_CIPHER, 660 [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, 661 [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, 662 [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, 663 [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, 664 [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, 665 [CRYPTO_AES_XTS] = ALG_CIPHER, 666 [CRYPTO_AES_ICM] = ALG_CIPHER, 667 [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, 668 [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, 669 [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, 670 [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, 671 [CRYPTO_CHACHA20] = ALG_CIPHER, 672 [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, 673 [CRYPTO_RIPEMD160] = ALG_DIGEST, 674 [CRYPTO_SHA2_224] = ALG_DIGEST, 675 [CRYPTO_SHA2_256] = ALG_DIGEST, 676 [CRYPTO_SHA2_384] = ALG_DIGEST, 677 [CRYPTO_SHA2_512] = ALG_DIGEST, 678 [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, 679 [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, 680 [CRYPTO_AES_CCM_16] = ALG_AEAD, 681 [CRYPTO_CHACHA20_POLY1305] = ALG_AEAD, 682 [CRYPTO_XCHACHA20_POLY1305] = ALG_AEAD, 683 }; 684 685 static enum alg_type 686 alg_type(int alg) 687 { 688 689 if (alg < nitems(alg_types)) 690 return (alg_types[alg]); 691 return (ALG_NONE); 692 } 693 694 static bool 695 alg_is_compression(int alg) 696 { 697 698 return (alg_type(alg) == ALG_COMPRESSION); 699 } 700 701 static bool 702 alg_is_cipher(int alg) 703 { 704 705 return (alg_type(alg) == ALG_CIPHER); 706 } 707 708 static bool 709 alg_is_digest(int alg) 710 { 711 712 return (alg_type(alg) == ALG_DIGEST || 713 alg_type(alg) == ALG_KEYED_DIGEST); 714 } 715 716 static bool 717 alg_is_keyed_digest(int alg) 718 { 719 720 return (alg_type(alg) == ALG_KEYED_DIGEST); 721 } 722 723 static bool 724 alg_is_aead(int alg) 725 { 726 727 return (alg_type(alg) == ALG_AEAD); 728 } 729 730 static bool 731 ccm_tag_length_valid(int len) 732 { 733 /* RFC 3610 */ 734 switch (len) { 735 case 4: 736 case 6: 737 case 8: 738 case 10: 739 case 12: 740 case 14: 741 case 16: 742 return (true); 743 default: 744 return (false); 745 } 746 } 747 748 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) 749 750 /* Various sanity checks on crypto session parameters. */ 751 static bool 752 check_csp(const struct crypto_session_params *csp) 753 { 754 const struct auth_hash *axf; 755 756 /* Mode-independent checks. */ 757 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) 758 return (false); 759 if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || 760 csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) 761 return (false); 762 if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) 763 return (false); 764 if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) 765 return (false); 766 767 switch (csp->csp_mode) { 768 case CSP_MODE_COMPRESS: 769 if (!alg_is_compression(csp->csp_cipher_alg)) 770 return (false); 771 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) 772 return (false); 773 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 774 return (false); 775 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || 776 csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 777 csp->csp_auth_mlen != 0) 778 return (false); 779 break; 780 case CSP_MODE_CIPHER: 781 if (!alg_is_cipher(csp->csp_cipher_alg)) 782 return (false); 783 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 784 return (false); 785 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 786 if (csp->csp_cipher_klen == 0) 787 return (false); 788 if (csp->csp_ivlen == 0) 789 return (false); 790 } 791 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 792 return (false); 793 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 794 csp->csp_auth_mlen != 0) 795 return (false); 796 break; 797 case CSP_MODE_DIGEST: 798 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) 799 return (false); 800 801 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 802 return (false); 803 804 /* IV is optional for digests (e.g. GMAC). */ 805 switch (csp->csp_auth_alg) { 806 case CRYPTO_AES_CCM_CBC_MAC: 807 if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13) 808 return (false); 809 break; 810 case CRYPTO_AES_NIST_GMAC: 811 if (csp->csp_ivlen != AES_GCM_IV_LEN) 812 return (false); 813 break; 814 default: 815 if (csp->csp_ivlen != 0) 816 return (false); 817 break; 818 } 819 820 if (!alg_is_digest(csp->csp_auth_alg)) 821 return (false); 822 823 /* Key is optional for BLAKE2 digests. */ 824 if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 825 csp->csp_auth_alg == CRYPTO_BLAKE2S) 826 ; 827 else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 828 if (csp->csp_auth_klen == 0) 829 return (false); 830 } else { 831 if (csp->csp_auth_klen != 0) 832 return (false); 833 } 834 if (csp->csp_auth_mlen != 0) { 835 axf = crypto_auth_hash(csp); 836 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 837 return (false); 838 839 if (csp->csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC && 840 !ccm_tag_length_valid(csp->csp_auth_mlen)) 841 return (false); 842 } 843 break; 844 case CSP_MODE_AEAD: 845 if (!alg_is_aead(csp->csp_cipher_alg)) 846 return (false); 847 if (csp->csp_cipher_klen == 0) 848 return (false); 849 if (csp->csp_ivlen == 0 || 850 csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 851 return (false); 852 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) 853 return (false); 854 855 switch (csp->csp_cipher_alg) { 856 case CRYPTO_AES_CCM_16: 857 if (csp->csp_auth_mlen != 0 && 858 !ccm_tag_length_valid(csp->csp_auth_mlen)) 859 return (false); 860 861 if (csp->csp_ivlen < 7 || csp->csp_ivlen > 13) 862 return (false); 863 break; 864 case CRYPTO_AES_NIST_GCM_16: 865 if (csp->csp_auth_mlen > AES_GMAC_HASH_LEN) 866 return (false); 867 868 if (csp->csp_ivlen != AES_GCM_IV_LEN) 869 return (false); 870 break; 871 case CRYPTO_CHACHA20_POLY1305: 872 if (csp->csp_ivlen != 8 && csp->csp_ivlen != 12) 873 return (false); 874 if (csp->csp_auth_mlen > POLY1305_HASH_LEN) 875 return (false); 876 break; 877 case CRYPTO_XCHACHA20_POLY1305: 878 if (csp->csp_ivlen != XCHACHA20_POLY1305_IV_LEN) 879 return (false); 880 if (csp->csp_auth_mlen > POLY1305_HASH_LEN) 881 return (false); 882 break; 883 } 884 break; 885 case CSP_MODE_ETA: 886 if (!alg_is_cipher(csp->csp_cipher_alg)) 887 return (false); 888 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 889 if (csp->csp_cipher_klen == 0) 890 return (false); 891 if (csp->csp_ivlen == 0) 892 return (false); 893 } 894 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 895 return (false); 896 if (!alg_is_digest(csp->csp_auth_alg)) 897 return (false); 898 899 /* Key is optional for BLAKE2 digests. */ 900 if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 901 csp->csp_auth_alg == CRYPTO_BLAKE2S) 902 ; 903 else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 904 if (csp->csp_auth_klen == 0) 905 return (false); 906 } else { 907 if (csp->csp_auth_klen != 0) 908 return (false); 909 } 910 if (csp->csp_auth_mlen != 0) { 911 axf = crypto_auth_hash(csp); 912 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 913 return (false); 914 } 915 break; 916 default: 917 return (false); 918 } 919 920 return (true); 921 } 922 923 /* 924 * Delete a session after it has been detached from its driver. 925 */ 926 static void 927 crypto_deletesession(crypto_session_t cses) 928 { 929 struct cryptocap *cap; 930 931 cap = cses->cap; 932 933 zfree(cses, M_CRYPTO_DATA); 934 935 CRYPTO_DRIVER_LOCK(); 936 cap->cc_sessions--; 937 if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 938 wakeup(cap); 939 CRYPTO_DRIVER_UNLOCK(); 940 cap_rele(cap); 941 } 942 943 /* 944 * Create a new session. The crid argument specifies a crypto 945 * driver to use or constraints on a driver to select (hardware 946 * only, software only, either). Whatever driver is selected 947 * must be capable of the requested crypto algorithms. 948 */ 949 int 950 crypto_newsession(crypto_session_t *cses, 951 const struct crypto_session_params *csp, int crid) 952 { 953 static uint64_t sessid = 0; 954 crypto_session_t res; 955 struct cryptocap *cap; 956 int err; 957 958 if (!check_csp(csp)) 959 return (EINVAL); 960 961 res = NULL; 962 963 CRYPTO_DRIVER_LOCK(); 964 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 965 /* 966 * Use specified driver; verify it is capable. 967 */ 968 cap = crypto_checkdriver(crid); 969 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) 970 cap = NULL; 971 } else { 972 /* 973 * No requested driver; select based on crid flags. 974 */ 975 cap = crypto_select_driver(csp, crid); 976 } 977 if (cap == NULL) { 978 CRYPTO_DRIVER_UNLOCK(); 979 CRYPTDEB("no driver"); 980 return (EOPNOTSUPP); 981 } 982 cap_ref(cap); 983 cap->cc_sessions++; 984 CRYPTO_DRIVER_UNLOCK(); 985 986 /* Allocate a single block for the generic session and driver softc. */ 987 res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA, 988 M_WAITOK | M_ZERO); 989 res->cap = cap; 990 res->csp = *csp; 991 res->id = atomic_fetchadd_64(&sessid, 1); 992 993 /* Call the driver initialization routine. */ 994 err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); 995 if (err != 0) { 996 CRYPTDEB("dev newsession failed: %d", err); 997 crypto_deletesession(res); 998 return (err); 999 } 1000 1001 *cses = res; 1002 return (0); 1003 } 1004 1005 /* 1006 * Delete an existing session (or a reserved session on an unregistered 1007 * driver). 1008 */ 1009 void 1010 crypto_freesession(crypto_session_t cses) 1011 { 1012 struct cryptocap *cap; 1013 1014 if (cses == NULL) 1015 return; 1016 1017 cap = cses->cap; 1018 1019 /* Call the driver cleanup routine, if available. */ 1020 CRYPTODEV_FREESESSION(cap->cc_dev, cses); 1021 1022 crypto_deletesession(cses); 1023 } 1024 1025 /* 1026 * Return a new driver id. Registers a driver with the system so that 1027 * it can be probed by subsequent sessions. 1028 */ 1029 int32_t 1030 crypto_get_driverid(device_t dev, size_t sessionsize, int flags) 1031 { 1032 struct cryptocap *cap, **newdrv; 1033 int i; 1034 1035 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 1036 device_printf(dev, 1037 "no flags specified when registering driver\n"); 1038 return -1; 1039 } 1040 1041 cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1042 cap->cc_dev = dev; 1043 cap->cc_session_size = sessionsize; 1044 cap->cc_flags = flags; 1045 refcount_init(&cap->cc_refs, 1); 1046 1047 CRYPTO_DRIVER_LOCK(); 1048 for (;;) { 1049 for (i = 0; i < crypto_drivers_size; i++) { 1050 if (crypto_drivers[i] == NULL) 1051 break; 1052 } 1053 1054 if (i < crypto_drivers_size) 1055 break; 1056 1057 /* Out of entries, allocate some more. */ 1058 1059 if (2 * crypto_drivers_size <= crypto_drivers_size) { 1060 CRYPTO_DRIVER_UNLOCK(); 1061 printf("crypto: driver count wraparound!\n"); 1062 cap_rele(cap); 1063 return (-1); 1064 } 1065 CRYPTO_DRIVER_UNLOCK(); 1066 1067 newdrv = malloc(2 * crypto_drivers_size * 1068 sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1069 1070 CRYPTO_DRIVER_LOCK(); 1071 memcpy(newdrv, crypto_drivers, 1072 crypto_drivers_size * sizeof(*crypto_drivers)); 1073 1074 crypto_drivers_size *= 2; 1075 1076 free(crypto_drivers, M_CRYPTO_DATA); 1077 crypto_drivers = newdrv; 1078 } 1079 1080 cap->cc_hid = i; 1081 crypto_drivers[i] = cap; 1082 CRYPTO_DRIVER_UNLOCK(); 1083 1084 if (bootverbose) 1085 printf("crypto: assign %s driver id %u, flags 0x%x\n", 1086 device_get_nameunit(dev), i, flags); 1087 1088 return i; 1089 } 1090 1091 /* 1092 * Lookup a driver by name. We match against the full device 1093 * name and unit, and against just the name. The latter gives 1094 * us a simple widlcarding by device name. On success return the 1095 * driver/hardware identifier; otherwise return -1. 1096 */ 1097 int 1098 crypto_find_driver(const char *match) 1099 { 1100 struct cryptocap *cap; 1101 int i, len = strlen(match); 1102 1103 CRYPTO_DRIVER_LOCK(); 1104 for (i = 0; i < crypto_drivers_size; i++) { 1105 if (crypto_drivers[i] == NULL) 1106 continue; 1107 cap = crypto_drivers[i]; 1108 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || 1109 strncmp(match, device_get_name(cap->cc_dev), len) == 0) { 1110 CRYPTO_DRIVER_UNLOCK(); 1111 return (i); 1112 } 1113 } 1114 CRYPTO_DRIVER_UNLOCK(); 1115 return (-1); 1116 } 1117 1118 /* 1119 * Return the device_t for the specified driver or NULL 1120 * if the driver identifier is invalid. 1121 */ 1122 device_t 1123 crypto_find_device_byhid(int hid) 1124 { 1125 struct cryptocap *cap; 1126 device_t dev; 1127 1128 dev = NULL; 1129 CRYPTO_DRIVER_LOCK(); 1130 cap = crypto_checkdriver(hid); 1131 if (cap != NULL) 1132 dev = cap->cc_dev; 1133 CRYPTO_DRIVER_UNLOCK(); 1134 return (dev); 1135 } 1136 1137 /* 1138 * Return the device/driver capabilities. 1139 */ 1140 int 1141 crypto_getcaps(int hid) 1142 { 1143 struct cryptocap *cap; 1144 int flags; 1145 1146 flags = 0; 1147 CRYPTO_DRIVER_LOCK(); 1148 cap = crypto_checkdriver(hid); 1149 if (cap != NULL) 1150 flags = cap->cc_flags; 1151 CRYPTO_DRIVER_UNLOCK(); 1152 return (flags); 1153 } 1154 1155 /* 1156 * Unregister all algorithms associated with a crypto driver. 1157 * If there are pending sessions using it, leave enough information 1158 * around so that subsequent calls using those sessions will 1159 * correctly detect the driver has been unregistered and reroute 1160 * requests. 1161 */ 1162 int 1163 crypto_unregister_all(uint32_t driverid) 1164 { 1165 struct cryptocap *cap; 1166 1167 CRYPTO_DRIVER_LOCK(); 1168 cap = crypto_checkdriver(driverid); 1169 if (cap == NULL) { 1170 CRYPTO_DRIVER_UNLOCK(); 1171 return (EINVAL); 1172 } 1173 1174 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1175 crypto_drivers[driverid] = NULL; 1176 1177 /* 1178 * XXX: This doesn't do anything to kick sessions that 1179 * have no pending operations. 1180 */ 1181 while (cap->cc_sessions != 0) 1182 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); 1183 CRYPTO_DRIVER_UNLOCK(); 1184 cap_rele(cap); 1185 1186 return (0); 1187 } 1188 1189 /* 1190 * Clear blockage on a driver. The what parameter indicates whether 1191 * the driver is now ready for cryptop's and/or cryptokop's. 1192 */ 1193 int 1194 crypto_unblock(uint32_t driverid, int what) 1195 { 1196 struct cryptocap *cap; 1197 int err; 1198 1199 CRYPTO_Q_LOCK(); 1200 cap = crypto_checkdriver(driverid); 1201 if (cap != NULL) { 1202 if (what & CRYPTO_SYMQ) 1203 cap->cc_qblocked = 0; 1204 if (crp_sleep) 1205 wakeup_one(&crp_q); 1206 err = 0; 1207 } else 1208 err = EINVAL; 1209 CRYPTO_Q_UNLOCK(); 1210 1211 return err; 1212 } 1213 1214 size_t 1215 crypto_buffer_len(struct crypto_buffer *cb) 1216 { 1217 switch (cb->cb_type) { 1218 case CRYPTO_BUF_CONTIG: 1219 return (cb->cb_buf_len); 1220 case CRYPTO_BUF_MBUF: 1221 if (cb->cb_mbuf->m_flags & M_PKTHDR) 1222 return (cb->cb_mbuf->m_pkthdr.len); 1223 return (m_length(cb->cb_mbuf, NULL)); 1224 case CRYPTO_BUF_SINGLE_MBUF: 1225 return (cb->cb_mbuf->m_len); 1226 case CRYPTO_BUF_VMPAGE: 1227 return (cb->cb_vm_page_len); 1228 case CRYPTO_BUF_UIO: 1229 return (cb->cb_uio->uio_resid); 1230 default: 1231 return (0); 1232 } 1233 } 1234 1235 #ifdef INVARIANTS 1236 /* Various sanity checks on crypto requests. */ 1237 static void 1238 cb_sanity(struct crypto_buffer *cb, const char *name) 1239 { 1240 KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, 1241 ("incoming crp with invalid %s buffer type", name)); 1242 switch (cb->cb_type) { 1243 case CRYPTO_BUF_CONTIG: 1244 KASSERT(cb->cb_buf_len >= 0, 1245 ("incoming crp with -ve %s buffer length", name)); 1246 break; 1247 case CRYPTO_BUF_VMPAGE: 1248 KASSERT(CRYPTO_HAS_VMPAGE, 1249 ("incoming crp uses dmap on supported arch")); 1250 KASSERT(cb->cb_vm_page_len >= 0, 1251 ("incoming crp with -ve %s buffer length", name)); 1252 KASSERT(cb->cb_vm_page_offset >= 0, 1253 ("incoming crp with -ve %s buffer offset", name)); 1254 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, 1255 ("incoming crp with %s buffer offset greater than page size" 1256 , name)); 1257 break; 1258 default: 1259 break; 1260 } 1261 } 1262 1263 static void 1264 crp_sanity(struct cryptop *crp) 1265 { 1266 struct crypto_session_params *csp; 1267 struct crypto_buffer *out; 1268 size_t ilen, len, olen; 1269 1270 KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); 1271 KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && 1272 crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, 1273 ("incoming crp with invalid output buffer type")); 1274 KASSERT(crp->crp_etype == 0, ("incoming crp with error")); 1275 KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), 1276 ("incoming crp already done")); 1277 1278 csp = &crp->crp_session->csp; 1279 cb_sanity(&crp->crp_buf, "input"); 1280 ilen = crypto_buffer_len(&crp->crp_buf); 1281 olen = ilen; 1282 out = NULL; 1283 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { 1284 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { 1285 cb_sanity(&crp->crp_obuf, "output"); 1286 out = &crp->crp_obuf; 1287 olen = crypto_buffer_len(out); 1288 } 1289 } else 1290 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, 1291 ("incoming crp with separate output buffer " 1292 "but no session support")); 1293 1294 switch (csp->csp_mode) { 1295 case CSP_MODE_COMPRESS: 1296 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || 1297 crp->crp_op == CRYPTO_OP_DECOMPRESS, 1298 ("invalid compression op %x", crp->crp_op)); 1299 break; 1300 case CSP_MODE_CIPHER: 1301 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || 1302 crp->crp_op == CRYPTO_OP_DECRYPT, 1303 ("invalid cipher op %x", crp->crp_op)); 1304 break; 1305 case CSP_MODE_DIGEST: 1306 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || 1307 crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, 1308 ("invalid digest op %x", crp->crp_op)); 1309 break; 1310 case CSP_MODE_AEAD: 1311 KASSERT(crp->crp_op == 1312 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1313 crp->crp_op == 1314 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1315 ("invalid AEAD op %x", crp->crp_op)); 1316 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1317 ("AEAD without a separate IV")); 1318 break; 1319 case CSP_MODE_ETA: 1320 KASSERT(crp->crp_op == 1321 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1322 crp->crp_op == 1323 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1324 ("invalid ETA op %x", crp->crp_op)); 1325 break; 1326 } 1327 if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1328 if (crp->crp_aad == NULL) { 1329 KASSERT(crp->crp_aad_start == 0 || 1330 crp->crp_aad_start < ilen, 1331 ("invalid AAD start")); 1332 KASSERT(crp->crp_aad_length != 0 || 1333 crp->crp_aad_start == 0, 1334 ("AAD with zero length and non-zero start")); 1335 KASSERT(crp->crp_aad_length == 0 || 1336 crp->crp_aad_start + crp->crp_aad_length <= ilen, 1337 ("AAD outside input length")); 1338 } else { 1339 KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, 1340 ("session doesn't support separate AAD buffer")); 1341 KASSERT(crp->crp_aad_start == 0, 1342 ("separate AAD buffer with non-zero AAD start")); 1343 KASSERT(crp->crp_aad_length != 0, 1344 ("separate AAD buffer with zero length")); 1345 } 1346 } else { 1347 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && 1348 crp->crp_aad_length == 0, 1349 ("AAD region in request not supporting AAD")); 1350 } 1351 if (csp->csp_ivlen == 0) { 1352 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, 1353 ("IV_SEPARATE set when IV isn't used")); 1354 KASSERT(crp->crp_iv_start == 0, 1355 ("crp_iv_start set when IV isn't used")); 1356 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { 1357 KASSERT(crp->crp_iv_start == 0, 1358 ("IV_SEPARATE used with non-zero IV start")); 1359 } else { 1360 KASSERT(crp->crp_iv_start < ilen, 1361 ("invalid IV start")); 1362 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, 1363 ("IV outside buffer length")); 1364 } 1365 /* XXX: payload_start of 0 should always be < ilen? */ 1366 KASSERT(crp->crp_payload_start == 0 || 1367 crp->crp_payload_start < ilen, 1368 ("invalid payload start")); 1369 KASSERT(crp->crp_payload_start + crp->crp_payload_length <= 1370 ilen, ("payload outside input buffer")); 1371 if (out == NULL) { 1372 KASSERT(crp->crp_payload_output_start == 0, 1373 ("payload output start non-zero without output buffer")); 1374 } else if (csp->csp_mode == CSP_MODE_DIGEST) { 1375 KASSERT(!(crp->crp_op & CRYPTO_OP_VERIFY_DIGEST), 1376 ("digest verify with separate output buffer")); 1377 KASSERT(crp->crp_payload_output_start == 0, 1378 ("digest operation with non-zero payload output start")); 1379 } else { 1380 KASSERT(crp->crp_payload_output_start == 0 || 1381 crp->crp_payload_output_start < olen, 1382 ("invalid payload output start")); 1383 KASSERT(crp->crp_payload_output_start + 1384 crp->crp_payload_length <= olen, 1385 ("payload outside output buffer")); 1386 } 1387 if (csp->csp_mode == CSP_MODE_DIGEST || 1388 csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1389 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) 1390 len = ilen; 1391 else 1392 len = olen; 1393 KASSERT(crp->crp_digest_start == 0 || 1394 crp->crp_digest_start < len, 1395 ("invalid digest start")); 1396 /* XXX: For the mlen == 0 case this check isn't perfect. */ 1397 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, 1398 ("digest outside buffer")); 1399 } else { 1400 KASSERT(crp->crp_digest_start == 0, 1401 ("non-zero digest start for request without a digest")); 1402 } 1403 if (csp->csp_cipher_klen != 0) 1404 KASSERT(csp->csp_cipher_key != NULL || 1405 crp->crp_cipher_key != NULL, 1406 ("cipher request without a key")); 1407 if (csp->csp_auth_klen != 0) 1408 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, 1409 ("auth request without a key")); 1410 KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); 1411 } 1412 #endif 1413 1414 static int 1415 crypto_dispatch_one(struct cryptop *crp, int hint) 1416 { 1417 struct cryptocap *cap; 1418 int result; 1419 1420 #ifdef INVARIANTS 1421 crp_sanity(crp); 1422 #endif 1423 CRYPTOSTAT_INC(cs_ops); 1424 1425 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 1426 1427 /* 1428 * Caller marked the request to be processed immediately; dispatch it 1429 * directly to the driver unless the driver is currently blocked, in 1430 * which case it is queued for deferred dispatch. 1431 */ 1432 cap = crp->crp_session->cap; 1433 if (!atomic_load_int(&cap->cc_qblocked)) { 1434 result = crypto_invoke(cap, crp, hint); 1435 if (result != ERESTART) 1436 return (result); 1437 1438 /* 1439 * The driver ran out of resources, put the request on the 1440 * queue. 1441 */ 1442 } 1443 crypto_batch_enqueue(crp); 1444 return (0); 1445 } 1446 1447 int 1448 crypto_dispatch(struct cryptop *crp) 1449 { 1450 return (crypto_dispatch_one(crp, 0)); 1451 } 1452 1453 int 1454 crypto_dispatch_async(struct cryptop *crp, int flags) 1455 { 1456 struct crypto_ret_worker *ret_worker; 1457 1458 if (!CRYPTO_SESS_SYNC(crp->crp_session)) { 1459 /* 1460 * The driver issues completions asynchonously, don't bother 1461 * deferring dispatch to a worker thread. 1462 */ 1463 return (crypto_dispatch(crp)); 1464 } 1465 1466 #ifdef INVARIANTS 1467 crp_sanity(crp); 1468 #endif 1469 CRYPTOSTAT_INC(cs_ops); 1470 1471 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 1472 if ((flags & CRYPTO_ASYNC_ORDERED) != 0) { 1473 crp->crp_flags |= CRYPTO_F_ASYNC_ORDERED; 1474 ret_worker = CRYPTO_RETW(crp->crp_retw_id); 1475 CRYPTO_RETW_LOCK(ret_worker); 1476 crp->crp_seq = ret_worker->reorder_ops++; 1477 CRYPTO_RETW_UNLOCK(ret_worker); 1478 } 1479 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 1480 taskqueue_enqueue(crypto_tq, &crp->crp_task); 1481 return (0); 1482 } 1483 1484 void 1485 crypto_dispatch_batch(struct cryptopq *crpq, int flags) 1486 { 1487 struct cryptop *crp; 1488 int hint; 1489 1490 while ((crp = TAILQ_FIRST(crpq)) != NULL) { 1491 hint = TAILQ_NEXT(crp, crp_next) != NULL ? CRYPTO_HINT_MORE : 0; 1492 TAILQ_REMOVE(crpq, crp, crp_next); 1493 if (crypto_dispatch_one(crp, hint) != 0) 1494 crypto_batch_enqueue(crp); 1495 } 1496 } 1497 1498 static void 1499 crypto_batch_enqueue(struct cryptop *crp) 1500 { 1501 1502 CRYPTO_Q_LOCK(); 1503 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 1504 if (crp_sleep) 1505 wakeup_one(&crp_q); 1506 CRYPTO_Q_UNLOCK(); 1507 } 1508 1509 static void 1510 crypto_task_invoke(void *ctx, int pending) 1511 { 1512 struct cryptocap *cap; 1513 struct cryptop *crp; 1514 int result; 1515 1516 crp = (struct cryptop *)ctx; 1517 cap = crp->crp_session->cap; 1518 result = crypto_invoke(cap, crp, 0); 1519 if (result == ERESTART) 1520 crypto_batch_enqueue(crp); 1521 } 1522 1523 /* 1524 * Dispatch a crypto request to the appropriate crypto devices. 1525 */ 1526 static int 1527 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1528 { 1529 int error; 1530 1531 KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 1532 KASSERT(crp->crp_callback != NULL, 1533 ("%s: crp->crp_callback == NULL", __func__)); 1534 KASSERT(crp->crp_session != NULL, 1535 ("%s: crp->crp_session == NULL", __func__)); 1536 1537 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1538 struct crypto_session_params csp; 1539 crypto_session_t nses; 1540 1541 /* 1542 * Driver has unregistered; migrate the session and return 1543 * an error to the caller so they'll resubmit the op. 1544 * 1545 * XXX: What if there are more already queued requests for this 1546 * session? 1547 * 1548 * XXX: Real solution is to make sessions refcounted 1549 * and force callers to hold a reference when 1550 * assigning to crp_session. Could maybe change 1551 * crypto_getreq to accept a session pointer to make 1552 * that work. Alternatively, we could abandon the 1553 * notion of rewriting crp_session in requests forcing 1554 * the caller to deal with allocating a new session. 1555 * Perhaps provide a method to allow a crp's session to 1556 * be swapped that callers could use. 1557 */ 1558 csp = crp->crp_session->csp; 1559 crypto_freesession(crp->crp_session); 1560 1561 /* 1562 * XXX: Key pointers may no longer be valid. If we 1563 * really want to support this we need to define the 1564 * KPI such that 'csp' is required to be valid for the 1565 * duration of a session by the caller perhaps. 1566 * 1567 * XXX: If the keys have been changed this will reuse 1568 * the old keys. This probably suggests making 1569 * rekeying more explicit and updating the key 1570 * pointers in 'csp' when the keys change. 1571 */ 1572 if (crypto_newsession(&nses, &csp, 1573 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 1574 crp->crp_session = nses; 1575 1576 crp->crp_etype = EAGAIN; 1577 crypto_done(crp); 1578 error = 0; 1579 } else { 1580 /* 1581 * Invoke the driver to process the request. Errors are 1582 * signaled by setting crp_etype before invoking the completion 1583 * callback. 1584 */ 1585 error = CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1586 KASSERT(error == 0 || error == ERESTART, 1587 ("%s: invalid error %d from CRYPTODEV_PROCESS", 1588 __func__, error)); 1589 } 1590 return (error); 1591 } 1592 1593 void 1594 crypto_destroyreq(struct cryptop *crp) 1595 { 1596 #ifdef DIAGNOSTIC 1597 { 1598 struct cryptop *crp2; 1599 struct crypto_ret_worker *ret_worker; 1600 1601 if (!crypto_destroyreq_check) 1602 return; 1603 1604 CRYPTO_Q_LOCK(); 1605 TAILQ_FOREACH(crp2, &crp_q, crp_next) { 1606 KASSERT(crp2 != crp, 1607 ("Freeing cryptop from the crypto queue (%p).", 1608 crp)); 1609 } 1610 CRYPTO_Q_UNLOCK(); 1611 1612 FOREACH_CRYPTO_RETW(ret_worker) { 1613 CRYPTO_RETW_LOCK(ret_worker); 1614 TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 1615 KASSERT(crp2 != crp, 1616 ("Freeing cryptop from the return queue (%p).", 1617 crp)); 1618 } 1619 CRYPTO_RETW_UNLOCK(ret_worker); 1620 } 1621 } 1622 #endif 1623 } 1624 1625 void 1626 crypto_freereq(struct cryptop *crp) 1627 { 1628 if (crp == NULL) 1629 return; 1630 1631 crypto_destroyreq(crp); 1632 uma_zfree(cryptop_zone, crp); 1633 } 1634 1635 void 1636 crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1637 { 1638 memset(crp, 0, sizeof(*crp)); 1639 crp->crp_session = cses; 1640 } 1641 1642 struct cryptop * 1643 crypto_getreq(crypto_session_t cses, int how) 1644 { 1645 struct cryptop *crp; 1646 1647 MPASS(how == M_WAITOK || how == M_NOWAIT); 1648 crp = uma_zalloc(cryptop_zone, how); 1649 if (crp != NULL) 1650 crypto_initreq(crp, cses); 1651 return (crp); 1652 } 1653 1654 /* 1655 * Clone a crypto request, but associate it with the specified session 1656 * rather than inheriting the session from the original request. The 1657 * fields describing the request buffers are copied, but not the 1658 * opaque field or callback function. 1659 */ 1660 struct cryptop * 1661 crypto_clonereq(struct cryptop *crp, crypto_session_t cses, int how) 1662 { 1663 struct cryptop *new; 1664 1665 MPASS((crp->crp_flags & CRYPTO_F_DONE) == 0); 1666 new = crypto_getreq(cses, how); 1667 if (new == NULL) 1668 return (NULL); 1669 1670 memcpy(&new->crp_startcopy, &crp->crp_startcopy, 1671 __rangeof(struct cryptop, crp_startcopy, crp_endcopy)); 1672 return (new); 1673 } 1674 1675 /* 1676 * Invoke the callback on behalf of the driver. 1677 */ 1678 void 1679 crypto_done(struct cryptop *crp) 1680 { 1681 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 1682 ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 1683 crp->crp_flags |= CRYPTO_F_DONE; 1684 if (crp->crp_etype != 0) 1685 CRYPTOSTAT_INC(cs_errs); 1686 1687 /* 1688 * CBIMM means unconditionally do the callback immediately; 1689 * CBIFSYNC means do the callback immediately only if the 1690 * operation was done synchronously. Both are used to avoid 1691 * doing extraneous context switches; the latter is mostly 1692 * used with the software crypto driver. 1693 */ 1694 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) == 0 && 1695 ((crp->crp_flags & CRYPTO_F_CBIMM) != 0 || 1696 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) != 0 && 1697 CRYPTO_SESS_SYNC(crp->crp_session)))) { 1698 /* 1699 * Do the callback directly. This is ok when the 1700 * callback routine does very little (e.g. the 1701 * /dev/crypto callback method just does a wakeup). 1702 */ 1703 crp->crp_callback(crp); 1704 } else { 1705 struct crypto_ret_worker *ret_worker; 1706 bool wake; 1707 1708 ret_worker = CRYPTO_RETW(crp->crp_retw_id); 1709 1710 /* 1711 * Normal case; queue the callback for the thread. 1712 */ 1713 CRYPTO_RETW_LOCK(ret_worker); 1714 if ((crp->crp_flags & CRYPTO_F_ASYNC_ORDERED) != 0) { 1715 struct cryptop *tmp; 1716 1717 TAILQ_FOREACH_REVERSE(tmp, 1718 &ret_worker->crp_ordered_ret_q, cryptop_q, 1719 crp_next) { 1720 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 1721 TAILQ_INSERT_AFTER( 1722 &ret_worker->crp_ordered_ret_q, tmp, 1723 crp, crp_next); 1724 break; 1725 } 1726 } 1727 if (tmp == NULL) { 1728 TAILQ_INSERT_HEAD( 1729 &ret_worker->crp_ordered_ret_q, crp, 1730 crp_next); 1731 } 1732 1733 wake = crp->crp_seq == ret_worker->reorder_cur_seq; 1734 } else { 1735 wake = TAILQ_EMPTY(&ret_worker->crp_ret_q); 1736 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, 1737 crp_next); 1738 } 1739 1740 if (wake) 1741 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 1742 CRYPTO_RETW_UNLOCK(ret_worker); 1743 } 1744 } 1745 1746 /* 1747 * Terminate a thread at module unload. The process that 1748 * initiated this is waiting for us to signal that we're gone; 1749 * wake it up and exit. We use the driver table lock to insure 1750 * we don't do the wakeup before they're waiting. There is no 1751 * race here because the waiter sleeps on the proc lock for the 1752 * thread so it gets notified at the right time because of an 1753 * extra wakeup that's done in exit1(). 1754 */ 1755 static void 1756 crypto_finis(void *chan) 1757 { 1758 CRYPTO_DRIVER_LOCK(); 1759 wakeup_one(chan); 1760 CRYPTO_DRIVER_UNLOCK(); 1761 kthread_exit(); 1762 } 1763 1764 /* 1765 * Crypto thread, dispatches crypto requests. 1766 */ 1767 static void 1768 crypto_dispatch_thread(void *arg __unused) 1769 { 1770 struct cryptop *crp, *submit; 1771 struct cryptocap *cap; 1772 int result, hint; 1773 1774 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1775 fpu_kern_thread(FPU_KERN_NORMAL); 1776 #endif 1777 1778 CRYPTO_Q_LOCK(); 1779 for (;;) { 1780 /* 1781 * Find the first element in the queue that can be 1782 * processed and look-ahead to see if multiple ops 1783 * are ready for the same driver. 1784 */ 1785 submit = NULL; 1786 hint = 0; 1787 TAILQ_FOREACH(crp, &crp_q, crp_next) { 1788 cap = crp->crp_session->cap; 1789 /* 1790 * Driver cannot disappeared when there is an active 1791 * session. 1792 */ 1793 KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1794 __func__, __LINE__)); 1795 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1796 /* Op needs to be migrated, process it. */ 1797 if (submit == NULL) 1798 submit = crp; 1799 break; 1800 } 1801 if (!cap->cc_qblocked) { 1802 if (submit != NULL) { 1803 /* 1804 * We stop on finding another op, 1805 * regardless whether its for the same 1806 * driver or not. We could keep 1807 * searching the queue but it might be 1808 * better to just use a per-driver 1809 * queue instead. 1810 */ 1811 if (submit->crp_session->cap == cap) 1812 hint = CRYPTO_HINT_MORE; 1813 } else { 1814 submit = crp; 1815 } 1816 break; 1817 } 1818 } 1819 if (submit != NULL) { 1820 TAILQ_REMOVE(&crp_q, submit, crp_next); 1821 cap = submit->crp_session->cap; 1822 KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1823 __func__, __LINE__)); 1824 CRYPTO_Q_UNLOCK(); 1825 result = crypto_invoke(cap, submit, hint); 1826 CRYPTO_Q_LOCK(); 1827 if (result == ERESTART) { 1828 /* 1829 * The driver ran out of resources, mark the 1830 * driver ``blocked'' for cryptop's and put 1831 * the request back in the queue. It would 1832 * best to put the request back where we got 1833 * it but that's hard so for now we put it 1834 * at the front. This should be ok; putting 1835 * it at the end does not work. 1836 */ 1837 cap->cc_qblocked = 1; 1838 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 1839 CRYPTOSTAT_INC(cs_blocks); 1840 } 1841 } else { 1842 /* 1843 * Nothing more to be processed. Sleep until we're 1844 * woken because there are more ops to process. 1845 * This happens either by submission or by a driver 1846 * becoming unblocked and notifying us through 1847 * crypto_unblock. Note that when we wakeup we 1848 * start processing each queue again from the 1849 * front. It's not clear that it's important to 1850 * preserve this ordering since ops may finish 1851 * out of order if dispatched to different devices 1852 * and some become blocked while others do not. 1853 */ 1854 crp_sleep = 1; 1855 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 1856 crp_sleep = 0; 1857 if (cryptotd == NULL) 1858 break; 1859 CRYPTOSTAT_INC(cs_intrs); 1860 } 1861 } 1862 CRYPTO_Q_UNLOCK(); 1863 1864 crypto_finis(&crp_q); 1865 } 1866 1867 /* 1868 * Crypto returns thread, does callbacks for processed crypto requests. 1869 * Callbacks are done here, rather than in the crypto drivers, because 1870 * callbacks typically are expensive and would slow interrupt handling. 1871 */ 1872 static void 1873 crypto_ret_thread(void *arg) 1874 { 1875 struct crypto_ret_worker *ret_worker = arg; 1876 struct cryptop *crpt; 1877 1878 CRYPTO_RETW_LOCK(ret_worker); 1879 for (;;) { 1880 /* Harvest return q's for completed ops */ 1881 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 1882 if (crpt != NULL) { 1883 if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 1884 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 1885 ret_worker->reorder_cur_seq++; 1886 } else { 1887 crpt = NULL; 1888 } 1889 } 1890 1891 if (crpt == NULL) { 1892 crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 1893 if (crpt != NULL) 1894 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 1895 } 1896 1897 if (crpt != NULL) { 1898 CRYPTO_RETW_UNLOCK(ret_worker); 1899 /* 1900 * Run callbacks unlocked. 1901 */ 1902 if (crpt != NULL) 1903 crpt->crp_callback(crpt); 1904 CRYPTO_RETW_LOCK(ret_worker); 1905 } else { 1906 /* 1907 * Nothing more to be processed. Sleep until we're 1908 * woken because there are more returns to process. 1909 */ 1910 msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 1911 "crypto_ret_wait", 0); 1912 if (ret_worker->td == NULL) 1913 break; 1914 CRYPTOSTAT_INC(cs_rets); 1915 } 1916 } 1917 CRYPTO_RETW_UNLOCK(ret_worker); 1918 1919 crypto_finis(&ret_worker->crp_ret_q); 1920 } 1921 1922 #ifdef DDB 1923 static void 1924 db_show_drivers(void) 1925 { 1926 int hid; 1927 1928 db_printf("%12s %4s %8s %2s\n" 1929 , "Device" 1930 , "Ses" 1931 , "Flags" 1932 , "QB" 1933 ); 1934 for (hid = 0; hid < crypto_drivers_size; hid++) { 1935 const struct cryptocap *cap = crypto_drivers[hid]; 1936 if (cap == NULL) 1937 continue; 1938 db_printf("%-12s %4u %08x %2u\n" 1939 , device_get_nameunit(cap->cc_dev) 1940 , cap->cc_sessions 1941 , cap->cc_flags 1942 , cap->cc_qblocked 1943 ); 1944 } 1945 } 1946 1947 DB_SHOW_COMMAND_FLAGS(crypto, db_show_crypto, DB_CMD_MEMSAFE) 1948 { 1949 struct cryptop *crp; 1950 struct crypto_ret_worker *ret_worker; 1951 1952 db_show_drivers(); 1953 db_printf("\n"); 1954 1955 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 1956 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 1957 "Device", "Callback"); 1958 TAILQ_FOREACH(crp, &crp_q, crp_next) { 1959 db_printf("%4u %08x %4u %4u %04x %8p %8p\n" 1960 , crp->crp_session->cap->cc_hid 1961 , (int) crypto_ses2caps(crp->crp_session) 1962 , crp->crp_olen 1963 , crp->crp_etype 1964 , crp->crp_flags 1965 , device_get_nameunit(crp->crp_session->cap->cc_dev) 1966 , crp->crp_callback 1967 ); 1968 } 1969 FOREACH_CRYPTO_RETW(ret_worker) { 1970 db_printf("\n%8s %4s %4s %4s %8s\n", 1971 "ret_worker", "HID", "Etype", "Flags", "Callback"); 1972 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 1973 TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 1974 db_printf("%8td %4u %4u %04x %8p\n" 1975 , CRYPTO_RETW_ID(ret_worker) 1976 , crp->crp_session->cap->cc_hid 1977 , crp->crp_etype 1978 , crp->crp_flags 1979 , crp->crp_callback 1980 ); 1981 } 1982 } 1983 } 1984 } 1985 #endif 1986 1987 int crypto_modevent(module_t mod, int type, void *unused); 1988 1989 /* 1990 * Initialization code, both for static and dynamic loading. 1991 * Note this is not invoked with the usual MODULE_DECLARE 1992 * mechanism but instead is listed as a dependency by the 1993 * cryptosoft driver. This guarantees proper ordering of 1994 * calls on module load/unload. 1995 */ 1996 int 1997 crypto_modevent(module_t mod, int type, void *unused) 1998 { 1999 int error = EINVAL; 2000 2001 switch (type) { 2002 case MOD_LOAD: 2003 error = crypto_init(); 2004 if (error == 0 && bootverbose) 2005 printf("crypto: <crypto core>\n"); 2006 break; 2007 case MOD_UNLOAD: 2008 /*XXX disallow if active sessions */ 2009 error = 0; 2010 crypto_destroy(); 2011 return 0; 2012 } 2013 return error; 2014 } 2015 MODULE_VERSION(crypto, 1); 2016 MODULE_DEPEND(crypto, zlib, 1, 1, 1); 2017