1 /*- 2 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #include <sys/cdefs.h> 26 __FBSDID("$FreeBSD$"); 27 28 /* 29 * Cryptographic Subsystem. 30 * 31 * This code is derived from the Openbsd Cryptographic Framework (OCF) 32 * that has the copyright shown below. Very little of the original 33 * code remains. 34 */ 35 36 /*- 37 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 38 * 39 * This code was written by Angelos D. Keromytis in Athens, Greece, in 40 * February 2000. Network Security Technologies Inc. (NSTI) kindly 41 * supported the development of this code. 42 * 43 * Copyright (c) 2000, 2001 Angelos D. Keromytis 44 * 45 * Permission to use, copy, and modify this software with or without fee 46 * is hereby granted, provided that this entire notice is included in 47 * all source code copies of any software which is or includes a copy or 48 * modification of this software. 49 * 50 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 51 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 52 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 53 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 54 * PURPOSE. 55 */ 56 57 #include "opt_compat.h" 58 #include "opt_ddb.h" 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/counter.h> 63 #include <sys/kernel.h> 64 #include <sys/kthread.h> 65 #include <sys/linker.h> 66 #include <sys/lock.h> 67 #include <sys/module.h> 68 #include <sys/mutex.h> 69 #include <sys/malloc.h> 70 #include <sys/mbuf.h> 71 #include <sys/proc.h> 72 #include <sys/refcount.h> 73 #include <sys/sdt.h> 74 #include <sys/smp.h> 75 #include <sys/sysctl.h> 76 #include <sys/taskqueue.h> 77 #include <sys/uio.h> 78 79 #include <ddb/ddb.h> 80 81 #include <machine/vmparam.h> 82 #include <vm/uma.h> 83 84 #include <crypto/intake.h> 85 #include <opencrypto/cryptodev.h> 86 #include <opencrypto/xform_auth.h> 87 #include <opencrypto/xform_enc.h> 88 89 #include <sys/kobj.h> 90 #include <sys/bus.h> 91 #include "cryptodev_if.h" 92 93 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 94 #include <machine/pcb.h> 95 #endif 96 97 SDT_PROVIDER_DEFINE(opencrypto); 98 99 /* 100 * Crypto drivers register themselves by allocating a slot in the 101 * crypto_drivers table with crypto_get_driverid() and then registering 102 * each asym algorithm they support with crypto_kregister(). 103 */ 104 static struct mtx crypto_drivers_mtx; /* lock on driver table */ 105 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 106 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 107 #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 108 109 /* 110 * Crypto device/driver capabilities structure. 111 * 112 * Synchronization: 113 * (d) - protected by CRYPTO_DRIVER_LOCK() 114 * (q) - protected by CRYPTO_Q_LOCK() 115 * Not tagged fields are read-only. 116 */ 117 struct cryptocap { 118 device_t cc_dev; 119 uint32_t cc_hid; 120 u_int32_t cc_sessions; /* (d) # of sessions */ 121 u_int32_t cc_koperations; /* (d) # os asym operations */ 122 u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; 123 124 int cc_flags; /* (d) flags */ 125 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 126 int cc_qblocked; /* (q) symmetric q blocked */ 127 int cc_kqblocked; /* (q) asymmetric q blocked */ 128 size_t cc_session_size; 129 volatile int cc_refs; 130 }; 131 132 static struct cryptocap **crypto_drivers = NULL; 133 static int crypto_drivers_size = 0; 134 135 struct crypto_session { 136 struct cryptocap *cap; 137 void *softc; 138 struct crypto_session_params csp; 139 }; 140 141 /* 142 * There are two queues for crypto requests; one for symmetric (e.g. 143 * cipher) operations and one for asymmetric (e.g. MOD)operations. 144 * A single mutex is used to lock access to both queues. We could 145 * have one per-queue but having one simplifies handling of block/unblock 146 * operations. 147 */ 148 static int crp_sleep = 0; 149 static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 150 static TAILQ_HEAD(,cryptkop) crp_kq; 151 static struct mtx crypto_q_mtx; 152 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 153 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 154 155 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, 156 "In-kernel cryptography"); 157 158 /* 159 * Taskqueue used to dispatch the crypto requests 160 * that have the CRYPTO_F_ASYNC flag 161 */ 162 static struct taskqueue *crypto_tq; 163 164 /* 165 * Crypto seq numbers are operated on with modular arithmetic 166 */ 167 #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 168 169 struct crypto_ret_worker { 170 struct mtx crypto_ret_mtx; 171 172 TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 173 TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 174 TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ 175 176 u_int32_t reorder_ops; /* total ordered sym jobs received */ 177 u_int32_t reorder_cur_seq; /* current sym job dispatched */ 178 179 struct proc *cryptoretproc; 180 }; 181 static struct crypto_ret_worker *crypto_ret_workers = NULL; 182 183 #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 184 #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 185 #define FOREACH_CRYPTO_RETW(w) \ 186 for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 187 188 #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 189 #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 190 #define CRYPTO_RETW_EMPTY(w) \ 191 (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) 192 193 static int crypto_workers_num = 0; 194 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, 195 &crypto_workers_num, 0, 196 "Number of crypto workers used to dispatch crypto jobs"); 197 #ifdef COMPAT_FREEBSD12 198 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 199 &crypto_workers_num, 0, 200 "Number of crypto workers used to dispatch crypto jobs"); 201 #endif 202 203 static uma_zone_t cryptop_zone; 204 static uma_zone_t cryptoses_zone; 205 206 int crypto_userasymcrypto = 1; 207 SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, 208 &crypto_userasymcrypto, 0, 209 "Enable user-mode access to asymmetric crypto support"); 210 #ifdef COMPAT_FREEBSD12 211 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, 212 &crypto_userasymcrypto, 0, 213 "Enable/disable user-mode access to asymmetric crypto support"); 214 #endif 215 216 int crypto_devallowsoft = 0; 217 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW, 218 &crypto_devallowsoft, 0, 219 "Enable use of software crypto by /dev/crypto"); 220 #ifdef COMPAT_FREEBSD12 221 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, 222 &crypto_devallowsoft, 0, 223 "Enable/disable use of software crypto by /dev/crypto"); 224 #endif 225 226 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 227 228 static void crypto_proc(void); 229 static struct proc *cryptoproc; 230 static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); 231 static void crypto_destroy(void); 232 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 233 static int crypto_kinvoke(struct cryptkop *krp); 234 static void crypto_task_invoke(void *ctx, int pending); 235 static void crypto_batch_enqueue(struct cryptop *crp); 236 237 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; 238 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, 239 cryptostats, nitems(cryptostats), 240 "Crypto system statistics"); 241 242 #define CRYPTOSTAT_INC(stat) do { \ 243 counter_u64_add( \ 244 cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 245 1); \ 246 } while (0) 247 248 static void 249 cryptostats_init(void *arg __unused) 250 { 251 COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); 252 } 253 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); 254 255 static void 256 cryptostats_fini(void *arg __unused) 257 { 258 COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); 259 } 260 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, 261 NULL); 262 263 /* Try to avoid directly exposing the key buffer as a symbol */ 264 static struct keybuf *keybuf; 265 266 static struct keybuf empty_keybuf = { 267 .kb_nents = 0 268 }; 269 270 /* Obtain the key buffer from boot metadata */ 271 static void 272 keybuf_init(void) 273 { 274 caddr_t kmdp; 275 276 kmdp = preload_search_by_type("elf kernel"); 277 278 if (kmdp == NULL) 279 kmdp = preload_search_by_type("elf64 kernel"); 280 281 keybuf = (struct keybuf *)preload_search_info(kmdp, 282 MODINFO_METADATA | MODINFOMD_KEYBUF); 283 284 if (keybuf == NULL) 285 keybuf = &empty_keybuf; 286 } 287 288 /* It'd be nice if we could store these in some kind of secure memory... */ 289 struct keybuf * get_keybuf(void) { 290 291 return (keybuf); 292 } 293 294 static struct cryptocap * 295 cap_ref(struct cryptocap *cap) 296 { 297 298 refcount_acquire(&cap->cc_refs); 299 return (cap); 300 } 301 302 static void 303 cap_rele(struct cryptocap *cap) 304 { 305 306 if (refcount_release(&cap->cc_refs) == 0) 307 return; 308 309 KASSERT(cap->cc_sessions == 0, 310 ("freeing crypto driver with active sessions")); 311 KASSERT(cap->cc_koperations == 0, 312 ("freeing crypto driver with active key operations")); 313 314 free(cap, M_CRYPTO_DATA); 315 } 316 317 static int 318 crypto_init(void) 319 { 320 struct crypto_ret_worker *ret_worker; 321 int error; 322 323 mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", 324 MTX_DEF|MTX_QUIET); 325 326 TAILQ_INIT(&crp_q); 327 TAILQ_INIT(&crp_kq); 328 mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); 329 330 cryptop_zone = uma_zcreate("cryptop", 331 sizeof(struct cryptop), NULL, NULL, NULL, NULL, 332 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 333 cryptoses_zone = uma_zcreate("crypto_session", 334 sizeof(struct crypto_session), NULL, NULL, NULL, NULL, 335 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 336 337 crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; 338 crypto_drivers = malloc(crypto_drivers_size * 339 sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 340 341 if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 342 crypto_workers_num = mp_ncpus; 343 344 crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, 345 taskqueue_thread_enqueue, &crypto_tq); 346 347 taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 348 "crypto"); 349 350 error = kproc_create((void (*)(void *)) crypto_proc, NULL, 351 &cryptoproc, 0, 0, "crypto"); 352 if (error) { 353 printf("crypto_init: cannot start crypto thread; error %d", 354 error); 355 goto bad; 356 } 357 358 crypto_ret_workers = mallocarray(crypto_workers_num, 359 sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 360 361 FOREACH_CRYPTO_RETW(ret_worker) { 362 TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 363 TAILQ_INIT(&ret_worker->crp_ret_q); 364 TAILQ_INIT(&ret_worker->crp_ret_kq); 365 366 ret_worker->reorder_ops = 0; 367 ret_worker->reorder_cur_seq = 0; 368 369 mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); 370 371 error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, 372 &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); 373 if (error) { 374 printf("crypto_init: cannot start cryptoret thread; error %d", 375 error); 376 goto bad; 377 } 378 } 379 380 keybuf_init(); 381 382 return 0; 383 bad: 384 crypto_destroy(); 385 return error; 386 } 387 388 /* 389 * Signal a crypto thread to terminate. We use the driver 390 * table lock to synchronize the sleep/wakeups so that we 391 * are sure the threads have terminated before we release 392 * the data structures they use. See crypto_finis below 393 * for the other half of this song-and-dance. 394 */ 395 static void 396 crypto_terminate(struct proc **pp, void *q) 397 { 398 struct proc *p; 399 400 mtx_assert(&crypto_drivers_mtx, MA_OWNED); 401 p = *pp; 402 *pp = NULL; 403 if (p) { 404 wakeup_one(q); 405 PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 406 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ 407 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); 408 PROC_UNLOCK(p); 409 CRYPTO_DRIVER_LOCK(); 410 } 411 } 412 413 static void 414 hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx, 415 uint8_t padval) 416 { 417 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; 418 u_int i; 419 420 KASSERT(axf->blocksize <= sizeof(hmac_key), 421 ("Invalid HMAC block size %d", axf->blocksize)); 422 423 /* 424 * If the key is larger than the block size, use the digest of 425 * the key as the key instead. 426 */ 427 memset(hmac_key, 0, sizeof(hmac_key)); 428 if (klen > axf->blocksize) { 429 axf->Init(auth_ctx); 430 axf->Update(auth_ctx, key, klen); 431 axf->Final(hmac_key, auth_ctx); 432 klen = axf->hashsize; 433 } else 434 memcpy(hmac_key, key, klen); 435 436 for (i = 0; i < axf->blocksize; i++) 437 hmac_key[i] ^= padval; 438 439 axf->Init(auth_ctx); 440 axf->Update(auth_ctx, hmac_key, axf->blocksize); 441 explicit_bzero(hmac_key, sizeof(hmac_key)); 442 } 443 444 void 445 hmac_init_ipad(struct auth_hash *axf, const char *key, int klen, 446 void *auth_ctx) 447 { 448 449 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); 450 } 451 452 void 453 hmac_init_opad(struct auth_hash *axf, const char *key, int klen, 454 void *auth_ctx) 455 { 456 457 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); 458 } 459 460 static void 461 crypto_destroy(void) 462 { 463 struct crypto_ret_worker *ret_worker; 464 int i; 465 466 /* 467 * Terminate any crypto threads. 468 */ 469 if (crypto_tq != NULL) 470 taskqueue_drain_all(crypto_tq); 471 CRYPTO_DRIVER_LOCK(); 472 crypto_terminate(&cryptoproc, &crp_q); 473 FOREACH_CRYPTO_RETW(ret_worker) 474 crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); 475 CRYPTO_DRIVER_UNLOCK(); 476 477 /* XXX flush queues??? */ 478 479 /* 480 * Reclaim dynamically allocated resources. 481 */ 482 for (i = 0; i < crypto_drivers_size; i++) { 483 if (crypto_drivers[i] != NULL) 484 cap_rele(crypto_drivers[i]); 485 } 486 free(crypto_drivers, M_CRYPTO_DATA); 487 488 if (cryptoses_zone != NULL) 489 uma_zdestroy(cryptoses_zone); 490 if (cryptop_zone != NULL) 491 uma_zdestroy(cryptop_zone); 492 mtx_destroy(&crypto_q_mtx); 493 FOREACH_CRYPTO_RETW(ret_worker) 494 mtx_destroy(&ret_worker->crypto_ret_mtx); 495 free(crypto_ret_workers, M_CRYPTO_DATA); 496 if (crypto_tq != NULL) 497 taskqueue_free(crypto_tq); 498 mtx_destroy(&crypto_drivers_mtx); 499 } 500 501 uint32_t 502 crypto_ses2hid(crypto_session_t crypto_session) 503 { 504 return (crypto_session->cap->cc_hid); 505 } 506 507 uint32_t 508 crypto_ses2caps(crypto_session_t crypto_session) 509 { 510 return (crypto_session->cap->cc_flags & 0xff000000); 511 } 512 513 void * 514 crypto_get_driver_session(crypto_session_t crypto_session) 515 { 516 return (crypto_session->softc); 517 } 518 519 const struct crypto_session_params * 520 crypto_get_params(crypto_session_t crypto_session) 521 { 522 return (&crypto_session->csp); 523 } 524 525 struct auth_hash * 526 crypto_auth_hash(const struct crypto_session_params *csp) 527 { 528 529 switch (csp->csp_auth_alg) { 530 case CRYPTO_SHA1_HMAC: 531 return (&auth_hash_hmac_sha1); 532 case CRYPTO_SHA2_224_HMAC: 533 return (&auth_hash_hmac_sha2_224); 534 case CRYPTO_SHA2_256_HMAC: 535 return (&auth_hash_hmac_sha2_256); 536 case CRYPTO_SHA2_384_HMAC: 537 return (&auth_hash_hmac_sha2_384); 538 case CRYPTO_SHA2_512_HMAC: 539 return (&auth_hash_hmac_sha2_512); 540 case CRYPTO_NULL_HMAC: 541 return (&auth_hash_null); 542 case CRYPTO_RIPEMD160_HMAC: 543 return (&auth_hash_hmac_ripemd_160); 544 case CRYPTO_SHA1: 545 return (&auth_hash_sha1); 546 case CRYPTO_SHA2_224: 547 return (&auth_hash_sha2_224); 548 case CRYPTO_SHA2_256: 549 return (&auth_hash_sha2_256); 550 case CRYPTO_SHA2_384: 551 return (&auth_hash_sha2_384); 552 case CRYPTO_SHA2_512: 553 return (&auth_hash_sha2_512); 554 case CRYPTO_AES_NIST_GMAC: 555 switch (csp->csp_auth_klen) { 556 case 128 / 8: 557 return (&auth_hash_nist_gmac_aes_128); 558 case 192 / 8: 559 return (&auth_hash_nist_gmac_aes_192); 560 case 256 / 8: 561 return (&auth_hash_nist_gmac_aes_256); 562 default: 563 return (NULL); 564 } 565 case CRYPTO_BLAKE2B: 566 return (&auth_hash_blake2b); 567 case CRYPTO_BLAKE2S: 568 return (&auth_hash_blake2s); 569 case CRYPTO_POLY1305: 570 return (&auth_hash_poly1305); 571 case CRYPTO_AES_CCM_CBC_MAC: 572 switch (csp->csp_auth_klen) { 573 case 128 / 8: 574 return (&auth_hash_ccm_cbc_mac_128); 575 case 192 / 8: 576 return (&auth_hash_ccm_cbc_mac_192); 577 case 256 / 8: 578 return (&auth_hash_ccm_cbc_mac_256); 579 default: 580 return (NULL); 581 } 582 default: 583 return (NULL); 584 } 585 } 586 587 struct enc_xform * 588 crypto_cipher(const struct crypto_session_params *csp) 589 { 590 591 switch (csp->csp_cipher_alg) { 592 case CRYPTO_RIJNDAEL128_CBC: 593 return (&enc_xform_rijndael128); 594 case CRYPTO_AES_XTS: 595 return (&enc_xform_aes_xts); 596 case CRYPTO_AES_ICM: 597 return (&enc_xform_aes_icm); 598 case CRYPTO_AES_NIST_GCM_16: 599 return (&enc_xform_aes_nist_gcm); 600 case CRYPTO_CAMELLIA_CBC: 601 return (&enc_xform_camellia); 602 case CRYPTO_NULL_CBC: 603 return (&enc_xform_null); 604 case CRYPTO_CHACHA20: 605 return (&enc_xform_chacha20); 606 case CRYPTO_AES_CCM_16: 607 return (&enc_xform_ccm); 608 default: 609 return (NULL); 610 } 611 } 612 613 static struct cryptocap * 614 crypto_checkdriver(u_int32_t hid) 615 { 616 617 return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); 618 } 619 620 /* 621 * Select a driver for a new session that supports the specified 622 * algorithms and, optionally, is constrained according to the flags. 623 */ 624 static struct cryptocap * 625 crypto_select_driver(const struct crypto_session_params *csp, int flags) 626 { 627 struct cryptocap *cap, *best; 628 int best_match, error, hid; 629 630 CRYPTO_DRIVER_ASSERT(); 631 632 best = NULL; 633 for (hid = 0; hid < crypto_drivers_size; hid++) { 634 /* 635 * If there is no driver for this slot, or the driver 636 * is not appropriate (hardware or software based on 637 * match), then skip. 638 */ 639 cap = crypto_drivers[hid]; 640 if (cap == NULL || 641 (cap->cc_flags & flags) == 0) 642 continue; 643 644 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); 645 if (error >= 0) 646 continue; 647 648 /* 649 * Use the driver with the highest probe value. 650 * Hardware drivers use a higher probe value than 651 * software. In case of a tie, prefer the driver with 652 * the fewest active sessions. 653 */ 654 if (best == NULL || error > best_match || 655 (error == best_match && 656 cap->cc_sessions < best->cc_sessions)) { 657 best = cap; 658 best_match = error; 659 } 660 } 661 return best; 662 } 663 664 static enum alg_type { 665 ALG_NONE = 0, 666 ALG_CIPHER, 667 ALG_DIGEST, 668 ALG_KEYED_DIGEST, 669 ALG_COMPRESSION, 670 ALG_AEAD 671 } alg_types[] = { 672 [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, 673 [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, 674 [CRYPTO_AES_CBC] = ALG_CIPHER, 675 [CRYPTO_SHA1] = ALG_DIGEST, 676 [CRYPTO_NULL_HMAC] = ALG_DIGEST, 677 [CRYPTO_NULL_CBC] = ALG_CIPHER, 678 [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, 679 [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, 680 [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, 681 [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, 682 [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, 683 [CRYPTO_AES_XTS] = ALG_CIPHER, 684 [CRYPTO_AES_ICM] = ALG_CIPHER, 685 [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, 686 [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, 687 [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, 688 [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, 689 [CRYPTO_CHACHA20] = ALG_CIPHER, 690 [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, 691 [CRYPTO_RIPEMD160] = ALG_DIGEST, 692 [CRYPTO_SHA2_224] = ALG_DIGEST, 693 [CRYPTO_SHA2_256] = ALG_DIGEST, 694 [CRYPTO_SHA2_384] = ALG_DIGEST, 695 [CRYPTO_SHA2_512] = ALG_DIGEST, 696 [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, 697 [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, 698 [CRYPTO_AES_CCM_16] = ALG_AEAD, 699 }; 700 701 static enum alg_type 702 alg_type(int alg) 703 { 704 705 if (alg < nitems(alg_types)) 706 return (alg_types[alg]); 707 return (ALG_NONE); 708 } 709 710 static bool 711 alg_is_compression(int alg) 712 { 713 714 return (alg_type(alg) == ALG_COMPRESSION); 715 } 716 717 static bool 718 alg_is_cipher(int alg) 719 { 720 721 return (alg_type(alg) == ALG_CIPHER); 722 } 723 724 static bool 725 alg_is_digest(int alg) 726 { 727 728 return (alg_type(alg) == ALG_DIGEST || 729 alg_type(alg) == ALG_KEYED_DIGEST); 730 } 731 732 static bool 733 alg_is_keyed_digest(int alg) 734 { 735 736 return (alg_type(alg) == ALG_KEYED_DIGEST); 737 } 738 739 static bool 740 alg_is_aead(int alg) 741 { 742 743 return (alg_type(alg) == ALG_AEAD); 744 } 745 746 /* Various sanity checks on crypto session parameters. */ 747 static bool 748 check_csp(const struct crypto_session_params *csp) 749 { 750 struct auth_hash *axf; 751 752 /* Mode-independent checks. */ 753 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) != 754 0) 755 return (false); 756 if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || 757 csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) 758 return (false); 759 if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) 760 return (false); 761 if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) 762 return (false); 763 764 switch (csp->csp_mode) { 765 case CSP_MODE_COMPRESS: 766 if (!alg_is_compression(csp->csp_cipher_alg)) 767 return (false); 768 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) 769 return (false); 770 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 771 return (false); 772 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || 773 csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 774 csp->csp_auth_mlen != 0) 775 return (false); 776 break; 777 case CSP_MODE_CIPHER: 778 if (!alg_is_cipher(csp->csp_cipher_alg)) 779 return (false); 780 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 781 return (false); 782 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 783 if (csp->csp_cipher_klen == 0) 784 return (false); 785 if (csp->csp_ivlen == 0) 786 return (false); 787 } 788 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 789 return (false); 790 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 791 csp->csp_auth_mlen != 0) 792 return (false); 793 break; 794 case CSP_MODE_DIGEST: 795 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) 796 return (false); 797 798 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 799 return (false); 800 801 /* IV is optional for digests (e.g. GMAC). */ 802 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 803 return (false); 804 if (!alg_is_digest(csp->csp_auth_alg)) 805 return (false); 806 807 /* Key is optional for BLAKE2 digests. */ 808 if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 809 csp->csp_auth_alg == CRYPTO_BLAKE2S) 810 ; 811 else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 812 if (csp->csp_auth_klen == 0) 813 return (false); 814 } else { 815 if (csp->csp_auth_klen != 0) 816 return (false); 817 } 818 if (csp->csp_auth_mlen != 0) { 819 axf = crypto_auth_hash(csp); 820 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 821 return (false); 822 } 823 break; 824 case CSP_MODE_AEAD: 825 if (!alg_is_aead(csp->csp_cipher_alg)) 826 return (false); 827 if (csp->csp_cipher_klen == 0) 828 return (false); 829 if (csp->csp_ivlen == 0 || 830 csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 831 return (false); 832 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) 833 return (false); 834 835 /* 836 * XXX: Would be nice to have a better way to get this 837 * value. 838 */ 839 switch (csp->csp_cipher_alg) { 840 case CRYPTO_AES_NIST_GCM_16: 841 case CRYPTO_AES_CCM_16: 842 if (csp->csp_auth_mlen > 16) 843 return (false); 844 break; 845 } 846 break; 847 case CSP_MODE_ETA: 848 if (!alg_is_cipher(csp->csp_cipher_alg)) 849 return (false); 850 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 851 if (csp->csp_cipher_klen == 0) 852 return (false); 853 if (csp->csp_ivlen == 0) 854 return (false); 855 } 856 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 857 return (false); 858 if (!alg_is_digest(csp->csp_auth_alg)) 859 return (false); 860 861 /* Key is optional for BLAKE2 digests. */ 862 if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 863 csp->csp_auth_alg == CRYPTO_BLAKE2S) 864 ; 865 else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 866 if (csp->csp_auth_klen == 0) 867 return (false); 868 } else { 869 if (csp->csp_auth_klen != 0) 870 return (false); 871 } 872 if (csp->csp_auth_mlen != 0) { 873 axf = crypto_auth_hash(csp); 874 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 875 return (false); 876 } 877 break; 878 default: 879 return (false); 880 } 881 882 return (true); 883 } 884 885 /* 886 * Delete a session after it has been detached from its driver. 887 */ 888 static void 889 crypto_deletesession(crypto_session_t cses) 890 { 891 struct cryptocap *cap; 892 893 cap = cses->cap; 894 895 zfree(cses->softc, M_CRYPTO_DATA); 896 uma_zfree(cryptoses_zone, cses); 897 898 CRYPTO_DRIVER_LOCK(); 899 cap->cc_sessions--; 900 if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 901 wakeup(cap); 902 CRYPTO_DRIVER_UNLOCK(); 903 cap_rele(cap); 904 } 905 906 /* 907 * Create a new session. The crid argument specifies a crypto 908 * driver to use or constraints on a driver to select (hardware 909 * only, software only, either). Whatever driver is selected 910 * must be capable of the requested crypto algorithms. 911 */ 912 int 913 crypto_newsession(crypto_session_t *cses, 914 const struct crypto_session_params *csp, int crid) 915 { 916 crypto_session_t res; 917 struct cryptocap *cap; 918 int err; 919 920 if (!check_csp(csp)) 921 return (EINVAL); 922 923 res = NULL; 924 925 CRYPTO_DRIVER_LOCK(); 926 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 927 /* 928 * Use specified driver; verify it is capable. 929 */ 930 cap = crypto_checkdriver(crid); 931 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) 932 cap = NULL; 933 } else { 934 /* 935 * No requested driver; select based on crid flags. 936 */ 937 cap = crypto_select_driver(csp, crid); 938 } 939 if (cap == NULL) { 940 CRYPTO_DRIVER_UNLOCK(); 941 CRYPTDEB("no driver"); 942 return (EOPNOTSUPP); 943 } 944 cap_ref(cap); 945 cap->cc_sessions++; 946 CRYPTO_DRIVER_UNLOCK(); 947 948 res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO); 949 res->cap = cap; 950 res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK | 951 M_ZERO); 952 res->csp = *csp; 953 954 /* Call the driver initialization routine. */ 955 err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); 956 if (err != 0) { 957 CRYPTDEB("dev newsession failed: %d", err); 958 crypto_deletesession(res); 959 return (err); 960 } 961 962 *cses = res; 963 return (0); 964 } 965 966 /* 967 * Delete an existing session (or a reserved session on an unregistered 968 * driver). 969 */ 970 void 971 crypto_freesession(crypto_session_t cses) 972 { 973 struct cryptocap *cap; 974 975 if (cses == NULL) 976 return; 977 978 cap = cses->cap; 979 980 /* Call the driver cleanup routine, if available. */ 981 CRYPTODEV_FREESESSION(cap->cc_dev, cses); 982 983 crypto_deletesession(cses); 984 } 985 986 /* 987 * Return a new driver id. Registers a driver with the system so that 988 * it can be probed by subsequent sessions. 989 */ 990 int32_t 991 crypto_get_driverid(device_t dev, size_t sessionsize, int flags) 992 { 993 struct cryptocap *cap, **newdrv; 994 int i; 995 996 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 997 device_printf(dev, 998 "no flags specified when registering driver\n"); 999 return -1; 1000 } 1001 1002 cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1003 cap->cc_dev = dev; 1004 cap->cc_session_size = sessionsize; 1005 cap->cc_flags = flags; 1006 refcount_init(&cap->cc_refs, 1); 1007 1008 CRYPTO_DRIVER_LOCK(); 1009 for (;;) { 1010 for (i = 0; i < crypto_drivers_size; i++) { 1011 if (crypto_drivers[i] == NULL) 1012 break; 1013 } 1014 1015 if (i < crypto_drivers_size) 1016 break; 1017 1018 /* Out of entries, allocate some more. */ 1019 1020 if (2 * crypto_drivers_size <= crypto_drivers_size) { 1021 CRYPTO_DRIVER_UNLOCK(); 1022 printf("crypto: driver count wraparound!\n"); 1023 cap_rele(cap); 1024 return (-1); 1025 } 1026 CRYPTO_DRIVER_UNLOCK(); 1027 1028 newdrv = malloc(2 * crypto_drivers_size * 1029 sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1030 1031 CRYPTO_DRIVER_LOCK(); 1032 memcpy(newdrv, crypto_drivers, 1033 crypto_drivers_size * sizeof(*crypto_drivers)); 1034 1035 crypto_drivers_size *= 2; 1036 1037 free(crypto_drivers, M_CRYPTO_DATA); 1038 crypto_drivers = newdrv; 1039 } 1040 1041 cap->cc_hid = i; 1042 crypto_drivers[i] = cap; 1043 CRYPTO_DRIVER_UNLOCK(); 1044 1045 if (bootverbose) 1046 printf("crypto: assign %s driver id %u, flags 0x%x\n", 1047 device_get_nameunit(dev), i, flags); 1048 1049 return i; 1050 } 1051 1052 /* 1053 * Lookup a driver by name. We match against the full device 1054 * name and unit, and against just the name. The latter gives 1055 * us a simple widlcarding by device name. On success return the 1056 * driver/hardware identifier; otherwise return -1. 1057 */ 1058 int 1059 crypto_find_driver(const char *match) 1060 { 1061 struct cryptocap *cap; 1062 int i, len = strlen(match); 1063 1064 CRYPTO_DRIVER_LOCK(); 1065 for (i = 0; i < crypto_drivers_size; i++) { 1066 if (crypto_drivers[i] == NULL) 1067 continue; 1068 cap = crypto_drivers[i]; 1069 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || 1070 strncmp(match, device_get_name(cap->cc_dev), len) == 0) { 1071 CRYPTO_DRIVER_UNLOCK(); 1072 return (i); 1073 } 1074 } 1075 CRYPTO_DRIVER_UNLOCK(); 1076 return (-1); 1077 } 1078 1079 /* 1080 * Return the device_t for the specified driver or NULL 1081 * if the driver identifier is invalid. 1082 */ 1083 device_t 1084 crypto_find_device_byhid(int hid) 1085 { 1086 struct cryptocap *cap; 1087 device_t dev; 1088 1089 dev = NULL; 1090 CRYPTO_DRIVER_LOCK(); 1091 cap = crypto_checkdriver(hid); 1092 if (cap != NULL) 1093 dev = cap->cc_dev; 1094 CRYPTO_DRIVER_UNLOCK(); 1095 return (dev); 1096 } 1097 1098 /* 1099 * Return the device/driver capabilities. 1100 */ 1101 int 1102 crypto_getcaps(int hid) 1103 { 1104 struct cryptocap *cap; 1105 int flags; 1106 1107 flags = 0; 1108 CRYPTO_DRIVER_LOCK(); 1109 cap = crypto_checkdriver(hid); 1110 if (cap != NULL) 1111 flags = cap->cc_flags; 1112 CRYPTO_DRIVER_UNLOCK(); 1113 return (flags); 1114 } 1115 1116 /* 1117 * Register support for a key-related algorithm. This routine 1118 * is called once for each algorithm supported a driver. 1119 */ 1120 int 1121 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags) 1122 { 1123 struct cryptocap *cap; 1124 int err; 1125 1126 CRYPTO_DRIVER_LOCK(); 1127 1128 cap = crypto_checkdriver(driverid); 1129 if (cap != NULL && 1130 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 1131 /* 1132 * XXX Do some performance testing to determine placing. 1133 * XXX We probably need an auxiliary data structure that 1134 * XXX describes relative performances. 1135 */ 1136 1137 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1138 if (bootverbose) 1139 printf("crypto: %s registers key alg %u flags %u\n" 1140 , device_get_nameunit(cap->cc_dev) 1141 , kalg 1142 , flags 1143 ); 1144 err = 0; 1145 } else 1146 err = EINVAL; 1147 1148 CRYPTO_DRIVER_UNLOCK(); 1149 return err; 1150 } 1151 1152 /* 1153 * Unregister all algorithms associated with a crypto driver. 1154 * If there are pending sessions using it, leave enough information 1155 * around so that subsequent calls using those sessions will 1156 * correctly detect the driver has been unregistered and reroute 1157 * requests. 1158 */ 1159 int 1160 crypto_unregister_all(u_int32_t driverid) 1161 { 1162 struct cryptocap *cap; 1163 1164 CRYPTO_DRIVER_LOCK(); 1165 cap = crypto_checkdriver(driverid); 1166 if (cap == NULL) { 1167 CRYPTO_DRIVER_UNLOCK(); 1168 return (EINVAL); 1169 } 1170 1171 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1172 crypto_drivers[driverid] = NULL; 1173 1174 /* 1175 * XXX: This doesn't do anything to kick sessions that 1176 * have no pending operations. 1177 */ 1178 while (cap->cc_sessions != 0 || cap->cc_koperations != 0) 1179 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); 1180 CRYPTO_DRIVER_UNLOCK(); 1181 cap_rele(cap); 1182 1183 return (0); 1184 } 1185 1186 /* 1187 * Clear blockage on a driver. The what parameter indicates whether 1188 * the driver is now ready for cryptop's and/or cryptokop's. 1189 */ 1190 int 1191 crypto_unblock(u_int32_t driverid, int what) 1192 { 1193 struct cryptocap *cap; 1194 int err; 1195 1196 CRYPTO_Q_LOCK(); 1197 cap = crypto_checkdriver(driverid); 1198 if (cap != NULL) { 1199 if (what & CRYPTO_SYMQ) 1200 cap->cc_qblocked = 0; 1201 if (what & CRYPTO_ASYMQ) 1202 cap->cc_kqblocked = 0; 1203 if (crp_sleep) 1204 wakeup_one(&crp_q); 1205 err = 0; 1206 } else 1207 err = EINVAL; 1208 CRYPTO_Q_UNLOCK(); 1209 1210 return err; 1211 } 1212 1213 size_t 1214 crypto_buffer_len(struct crypto_buffer *cb) 1215 { 1216 switch (cb->cb_type) { 1217 case CRYPTO_BUF_CONTIG: 1218 return (cb->cb_buf_len); 1219 case CRYPTO_BUF_MBUF: 1220 if (cb->cb_mbuf->m_flags & M_PKTHDR) 1221 return (cb->cb_mbuf->m_pkthdr.len); 1222 return (m_length(cb->cb_mbuf, NULL)); 1223 case CRYPTO_BUF_VMPAGE: 1224 return (cb->cb_vm_page_len); 1225 case CRYPTO_BUF_UIO: 1226 return (cb->cb_uio->uio_resid); 1227 default: 1228 return (0); 1229 } 1230 } 1231 1232 #ifdef INVARIANTS 1233 /* Various sanity checks on crypto requests. */ 1234 static void 1235 cb_sanity(struct crypto_buffer *cb, const char *name) 1236 { 1237 KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, 1238 ("incoming crp with invalid %s buffer type", name)); 1239 switch (cb->cb_type) { 1240 case CRYPTO_BUF_CONTIG: 1241 KASSERT(cb->cb_buf_len >= 0, 1242 ("incoming crp with -ve %s buffer length", name)); 1243 break; 1244 case CRYPTO_BUF_VMPAGE: 1245 KASSERT(CRYPTO_HAS_VMPAGE, 1246 ("incoming crp uses dmap on supported arch")); 1247 KASSERT(cb->cb_vm_page_len >= 0, 1248 ("incoming crp with -ve %s buffer length", name)); 1249 KASSERT(cb->cb_vm_page_offset >= 0, 1250 ("incoming crp with -ve %s buffer offset", name)); 1251 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, 1252 ("incoming crp with %s buffer offset greater than page size" 1253 , name)); 1254 break; 1255 default: 1256 break; 1257 } 1258 } 1259 1260 static void 1261 crp_sanity(struct cryptop *crp) 1262 { 1263 struct crypto_session_params *csp; 1264 struct crypto_buffer *out; 1265 size_t ilen, len, olen; 1266 1267 KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); 1268 KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && 1269 crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, 1270 ("incoming crp with invalid output buffer type")); 1271 KASSERT(crp->crp_etype == 0, ("incoming crp with error")); 1272 KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), 1273 ("incoming crp already done")); 1274 1275 csp = &crp->crp_session->csp; 1276 cb_sanity(&crp->crp_buf, "input"); 1277 ilen = crypto_buffer_len(&crp->crp_buf); 1278 olen = ilen; 1279 out = NULL; 1280 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { 1281 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { 1282 cb_sanity(&crp->crp_obuf, "output"); 1283 out = &crp->crp_obuf; 1284 olen = crypto_buffer_len(out); 1285 } 1286 } else 1287 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, 1288 ("incoming crp with separate output buffer " 1289 "but no session support")); 1290 1291 switch (csp->csp_mode) { 1292 case CSP_MODE_COMPRESS: 1293 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || 1294 crp->crp_op == CRYPTO_OP_DECOMPRESS, 1295 ("invalid compression op %x", crp->crp_op)); 1296 break; 1297 case CSP_MODE_CIPHER: 1298 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || 1299 crp->crp_op == CRYPTO_OP_DECRYPT, 1300 ("invalid cipher op %x", crp->crp_op)); 1301 break; 1302 case CSP_MODE_DIGEST: 1303 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || 1304 crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, 1305 ("invalid digest op %x", crp->crp_op)); 1306 break; 1307 case CSP_MODE_AEAD: 1308 KASSERT(crp->crp_op == 1309 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1310 crp->crp_op == 1311 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1312 ("invalid AEAD op %x", crp->crp_op)); 1313 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) 1314 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1315 ("GCM without a separate IV")); 1316 if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) 1317 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1318 ("CCM without a separate IV")); 1319 break; 1320 case CSP_MODE_ETA: 1321 KASSERT(crp->crp_op == 1322 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1323 crp->crp_op == 1324 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1325 ("invalid ETA op %x", crp->crp_op)); 1326 break; 1327 } 1328 if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1329 if (crp->crp_aad == NULL) { 1330 KASSERT(crp->crp_aad_start == 0 || 1331 crp->crp_aad_start < ilen, 1332 ("invalid AAD start")); 1333 KASSERT(crp->crp_aad_length != 0 || 1334 crp->crp_aad_start == 0, 1335 ("AAD with zero length and non-zero start")); 1336 KASSERT(crp->crp_aad_length == 0 || 1337 crp->crp_aad_start + crp->crp_aad_length <= ilen, 1338 ("AAD outside input length")); 1339 } else { 1340 KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, 1341 ("session doesn't support separate AAD buffer")); 1342 KASSERT(crp->crp_aad_start == 0, 1343 ("separate AAD buffer with non-zero AAD start")); 1344 KASSERT(crp->crp_aad_length != 0, 1345 ("separate AAD buffer with zero length")); 1346 } 1347 } else { 1348 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && 1349 crp->crp_aad_length == 0, 1350 ("AAD region in request not supporting AAD")); 1351 } 1352 if (csp->csp_ivlen == 0) { 1353 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, 1354 ("IV_SEPARATE set when IV isn't used")); 1355 KASSERT(crp->crp_iv_start == 0, 1356 ("crp_iv_start set when IV isn't used")); 1357 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { 1358 KASSERT(crp->crp_iv_start == 0, 1359 ("IV_SEPARATE used with non-zero IV start")); 1360 } else { 1361 KASSERT(crp->crp_iv_start < ilen, 1362 ("invalid IV start")); 1363 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, 1364 ("IV outside buffer length")); 1365 } 1366 /* XXX: payload_start of 0 should always be < ilen? */ 1367 KASSERT(crp->crp_payload_start == 0 || 1368 crp->crp_payload_start < ilen, 1369 ("invalid payload start")); 1370 KASSERT(crp->crp_payload_start + crp->crp_payload_length <= 1371 ilen, ("payload outside input buffer")); 1372 if (out == NULL) { 1373 KASSERT(crp->crp_payload_output_start == 0, 1374 ("payload output start non-zero without output buffer")); 1375 } else { 1376 KASSERT(crp->crp_payload_output_start < olen, 1377 ("invalid payload output start")); 1378 KASSERT(crp->crp_payload_output_start + 1379 crp->crp_payload_length <= olen, 1380 ("payload outside output buffer")); 1381 } 1382 if (csp->csp_mode == CSP_MODE_DIGEST || 1383 csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1384 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) 1385 len = ilen; 1386 else 1387 len = olen; 1388 KASSERT(crp->crp_digest_start == 0 || 1389 crp->crp_digest_start < len, 1390 ("invalid digest start")); 1391 /* XXX: For the mlen == 0 case this check isn't perfect. */ 1392 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, 1393 ("digest outside buffer")); 1394 } else { 1395 KASSERT(crp->crp_digest_start == 0, 1396 ("non-zero digest start for request without a digest")); 1397 } 1398 if (csp->csp_cipher_klen != 0) 1399 KASSERT(csp->csp_cipher_key != NULL || 1400 crp->crp_cipher_key != NULL, 1401 ("cipher request without a key")); 1402 if (csp->csp_auth_klen != 0) 1403 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, 1404 ("auth request without a key")); 1405 KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); 1406 } 1407 #endif 1408 1409 /* 1410 * Add a crypto request to a queue, to be processed by the kernel thread. 1411 */ 1412 int 1413 crypto_dispatch(struct cryptop *crp) 1414 { 1415 struct cryptocap *cap; 1416 int result; 1417 1418 #ifdef INVARIANTS 1419 crp_sanity(crp); 1420 #endif 1421 1422 CRYPTOSTAT_INC(cs_ops); 1423 1424 crp->crp_retw_id = ((uintptr_t)crp->crp_session) % crypto_workers_num; 1425 1426 if (CRYPTOP_ASYNC(crp)) { 1427 if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { 1428 struct crypto_ret_worker *ret_worker; 1429 1430 ret_worker = CRYPTO_RETW(crp->crp_retw_id); 1431 1432 CRYPTO_RETW_LOCK(ret_worker); 1433 crp->crp_seq = ret_worker->reorder_ops++; 1434 CRYPTO_RETW_UNLOCK(ret_worker); 1435 } 1436 1437 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 1438 taskqueue_enqueue(crypto_tq, &crp->crp_task); 1439 return (0); 1440 } 1441 1442 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { 1443 /* 1444 * Caller marked the request to be processed 1445 * immediately; dispatch it directly to the 1446 * driver unless the driver is currently blocked. 1447 */ 1448 cap = crp->crp_session->cap; 1449 if (!cap->cc_qblocked) { 1450 result = crypto_invoke(cap, crp, 0); 1451 if (result != ERESTART) 1452 return (result); 1453 /* 1454 * The driver ran out of resources, put the request on 1455 * the queue. 1456 */ 1457 } 1458 } 1459 crypto_batch_enqueue(crp); 1460 return 0; 1461 } 1462 1463 void 1464 crypto_batch_enqueue(struct cryptop *crp) 1465 { 1466 1467 CRYPTO_Q_LOCK(); 1468 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 1469 if (crp_sleep) 1470 wakeup_one(&crp_q); 1471 CRYPTO_Q_UNLOCK(); 1472 } 1473 1474 /* 1475 * Add an asymetric crypto request to a queue, 1476 * to be processed by the kernel thread. 1477 */ 1478 int 1479 crypto_kdispatch(struct cryptkop *krp) 1480 { 1481 int error; 1482 1483 CRYPTOSTAT_INC(cs_kops); 1484 1485 krp->krp_cap = NULL; 1486 error = crypto_kinvoke(krp); 1487 if (error == ERESTART) { 1488 CRYPTO_Q_LOCK(); 1489 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 1490 if (crp_sleep) 1491 wakeup_one(&crp_q); 1492 CRYPTO_Q_UNLOCK(); 1493 error = 0; 1494 } 1495 return error; 1496 } 1497 1498 /* 1499 * Verify a driver is suitable for the specified operation. 1500 */ 1501 static __inline int 1502 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) 1503 { 1504 return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; 1505 } 1506 1507 /* 1508 * Select a driver for an asym operation. The driver must 1509 * support the necessary algorithm. The caller can constrain 1510 * which device is selected with the flags parameter. The 1511 * algorithm we use here is pretty stupid; just use the first 1512 * driver that supports the algorithms we need. If there are 1513 * multiple suitable drivers we choose the driver with the 1514 * fewest active operations. We prefer hardware-backed 1515 * drivers to software ones when either may be used. 1516 */ 1517 static struct cryptocap * 1518 crypto_select_kdriver(const struct cryptkop *krp, int flags) 1519 { 1520 struct cryptocap *cap, *best; 1521 int match, hid; 1522 1523 CRYPTO_DRIVER_ASSERT(); 1524 1525 /* 1526 * Look first for hardware crypto devices if permitted. 1527 */ 1528 if (flags & CRYPTOCAP_F_HARDWARE) 1529 match = CRYPTOCAP_F_HARDWARE; 1530 else 1531 match = CRYPTOCAP_F_SOFTWARE; 1532 best = NULL; 1533 again: 1534 for (hid = 0; hid < crypto_drivers_size; hid++) { 1535 /* 1536 * If there is no driver for this slot, or the driver 1537 * is not appropriate (hardware or software based on 1538 * match), then skip. 1539 */ 1540 cap = crypto_drivers[hid]; 1541 if (cap->cc_dev == NULL || 1542 (cap->cc_flags & match) == 0) 1543 continue; 1544 1545 /* verify all the algorithms are supported. */ 1546 if (kdriver_suitable(cap, krp)) { 1547 if (best == NULL || 1548 cap->cc_koperations < best->cc_koperations) 1549 best = cap; 1550 } 1551 } 1552 if (best != NULL) 1553 return best; 1554 if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { 1555 /* sort of an Algol 68-style for loop */ 1556 match = CRYPTOCAP_F_SOFTWARE; 1557 goto again; 1558 } 1559 return best; 1560 } 1561 1562 /* 1563 * Choose a driver for an asymmetric crypto request. 1564 */ 1565 static struct cryptocap * 1566 crypto_lookup_kdriver(struct cryptkop *krp) 1567 { 1568 struct cryptocap *cap; 1569 uint32_t crid; 1570 1571 /* If this request is requeued, it might already have a driver. */ 1572 cap = krp->krp_cap; 1573 if (cap != NULL) 1574 return (cap); 1575 1576 /* Use krp_crid to choose a driver. */ 1577 crid = krp->krp_crid; 1578 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 1579 cap = crypto_checkdriver(crid); 1580 if (cap != NULL) { 1581 /* 1582 * Driver present, it must support the 1583 * necessary algorithm and, if s/w drivers are 1584 * excluded, it must be registered as 1585 * hardware-backed. 1586 */ 1587 if (!kdriver_suitable(cap, krp) || 1588 (!crypto_devallowsoft && 1589 (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) 1590 cap = NULL; 1591 } 1592 } else { 1593 /* 1594 * No requested driver; select based on crid flags. 1595 */ 1596 if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ 1597 crid &= ~CRYPTOCAP_F_SOFTWARE; 1598 cap = crypto_select_kdriver(krp, crid); 1599 } 1600 1601 if (cap != NULL) { 1602 krp->krp_cap = cap_ref(cap); 1603 krp->krp_hid = cap->cc_hid; 1604 } 1605 return (cap); 1606 } 1607 1608 /* 1609 * Dispatch an asymmetric crypto request. 1610 */ 1611 static int 1612 crypto_kinvoke(struct cryptkop *krp) 1613 { 1614 struct cryptocap *cap = NULL; 1615 int error; 1616 1617 KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); 1618 KASSERT(krp->krp_callback != NULL, 1619 ("%s: krp->crp_callback == NULL", __func__)); 1620 1621 CRYPTO_DRIVER_LOCK(); 1622 cap = crypto_lookup_kdriver(krp); 1623 if (cap == NULL) { 1624 CRYPTO_DRIVER_UNLOCK(); 1625 krp->krp_status = ENODEV; 1626 crypto_kdone(krp); 1627 return (0); 1628 } 1629 1630 /* 1631 * If the device is blocked, return ERESTART to requeue it. 1632 */ 1633 if (cap->cc_kqblocked) { 1634 /* 1635 * XXX: Previously this set krp_status to ERESTART and 1636 * invoked crypto_kdone but the caller would still 1637 * requeue it. 1638 */ 1639 CRYPTO_DRIVER_UNLOCK(); 1640 return (ERESTART); 1641 } 1642 1643 cap->cc_koperations++; 1644 CRYPTO_DRIVER_UNLOCK(); 1645 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); 1646 if (error == ERESTART) { 1647 CRYPTO_DRIVER_LOCK(); 1648 cap->cc_koperations--; 1649 CRYPTO_DRIVER_UNLOCK(); 1650 return (error); 1651 } 1652 1653 KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); 1654 return (0); 1655 } 1656 1657 static void 1658 crypto_task_invoke(void *ctx, int pending) 1659 { 1660 struct cryptocap *cap; 1661 struct cryptop *crp; 1662 int result; 1663 1664 crp = (struct cryptop *)ctx; 1665 cap = crp->crp_session->cap; 1666 result = crypto_invoke(cap, crp, 0); 1667 if (result == ERESTART) 1668 crypto_batch_enqueue(crp); 1669 } 1670 1671 /* 1672 * Dispatch a crypto request to the appropriate crypto devices. 1673 */ 1674 static int 1675 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1676 { 1677 1678 KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 1679 KASSERT(crp->crp_callback != NULL, 1680 ("%s: crp->crp_callback == NULL", __func__)); 1681 KASSERT(crp->crp_session != NULL, 1682 ("%s: crp->crp_session == NULL", __func__)); 1683 1684 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1685 struct crypto_session_params csp; 1686 crypto_session_t nses; 1687 1688 /* 1689 * Driver has unregistered; migrate the session and return 1690 * an error to the caller so they'll resubmit the op. 1691 * 1692 * XXX: What if there are more already queued requests for this 1693 * session? 1694 * 1695 * XXX: Real solution is to make sessions refcounted 1696 * and force callers to hold a reference when 1697 * assigning to crp_session. Could maybe change 1698 * crypto_getreq to accept a session pointer to make 1699 * that work. Alternatively, we could abandon the 1700 * notion of rewriting crp_session in requests forcing 1701 * the caller to deal with allocating a new session. 1702 * Perhaps provide a method to allow a crp's session to 1703 * be swapped that callers could use. 1704 */ 1705 csp = crp->crp_session->csp; 1706 crypto_freesession(crp->crp_session); 1707 1708 /* 1709 * XXX: Key pointers may no longer be valid. If we 1710 * really want to support this we need to define the 1711 * KPI such that 'csp' is required to be valid for the 1712 * duration of a session by the caller perhaps. 1713 * 1714 * XXX: If the keys have been changed this will reuse 1715 * the old keys. This probably suggests making 1716 * rekeying more explicit and updating the key 1717 * pointers in 'csp' when the keys change. 1718 */ 1719 if (crypto_newsession(&nses, &csp, 1720 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 1721 crp->crp_session = nses; 1722 1723 crp->crp_etype = EAGAIN; 1724 crypto_done(crp); 1725 return 0; 1726 } else { 1727 /* 1728 * Invoke the driver to process the request. 1729 */ 1730 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1731 } 1732 } 1733 1734 void 1735 crypto_destroyreq(struct cryptop *crp) 1736 { 1737 #ifdef DIAGNOSTIC 1738 { 1739 struct cryptop *crp2; 1740 struct crypto_ret_worker *ret_worker; 1741 1742 CRYPTO_Q_LOCK(); 1743 TAILQ_FOREACH(crp2, &crp_q, crp_next) { 1744 KASSERT(crp2 != crp, 1745 ("Freeing cryptop from the crypto queue (%p).", 1746 crp)); 1747 } 1748 CRYPTO_Q_UNLOCK(); 1749 1750 FOREACH_CRYPTO_RETW(ret_worker) { 1751 CRYPTO_RETW_LOCK(ret_worker); 1752 TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 1753 KASSERT(crp2 != crp, 1754 ("Freeing cryptop from the return queue (%p).", 1755 crp)); 1756 } 1757 CRYPTO_RETW_UNLOCK(ret_worker); 1758 } 1759 } 1760 #endif 1761 } 1762 1763 void 1764 crypto_freereq(struct cryptop *crp) 1765 { 1766 if (crp == NULL) 1767 return; 1768 1769 crypto_destroyreq(crp); 1770 uma_zfree(cryptop_zone, crp); 1771 } 1772 1773 static void 1774 _crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1775 { 1776 crp->crp_session = cses; 1777 } 1778 1779 void 1780 crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1781 { 1782 memset(crp, 0, sizeof(*crp)); 1783 _crypto_initreq(crp, cses); 1784 } 1785 1786 struct cryptop * 1787 crypto_getreq(crypto_session_t cses, int how) 1788 { 1789 struct cryptop *crp; 1790 1791 MPASS(how == M_WAITOK || how == M_NOWAIT); 1792 crp = uma_zalloc(cryptop_zone, how | M_ZERO); 1793 if (crp != NULL) 1794 _crypto_initreq(crp, cses); 1795 return (crp); 1796 } 1797 1798 /* 1799 * Invoke the callback on behalf of the driver. 1800 */ 1801 void 1802 crypto_done(struct cryptop *crp) 1803 { 1804 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 1805 ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 1806 crp->crp_flags |= CRYPTO_F_DONE; 1807 if (crp->crp_etype != 0) 1808 CRYPTOSTAT_INC(cs_errs); 1809 1810 /* 1811 * CBIMM means unconditionally do the callback immediately; 1812 * CBIFSYNC means do the callback immediately only if the 1813 * operation was done synchronously. Both are used to avoid 1814 * doing extraneous context switches; the latter is mostly 1815 * used with the software crypto driver. 1816 */ 1817 if (!CRYPTOP_ASYNC_KEEPORDER(crp) && 1818 ((crp->crp_flags & CRYPTO_F_CBIMM) || 1819 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && 1820 (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { 1821 /* 1822 * Do the callback directly. This is ok when the 1823 * callback routine does very little (e.g. the 1824 * /dev/crypto callback method just does a wakeup). 1825 */ 1826 crp->crp_callback(crp); 1827 } else { 1828 struct crypto_ret_worker *ret_worker; 1829 bool wake; 1830 1831 ret_worker = CRYPTO_RETW(crp->crp_retw_id); 1832 wake = false; 1833 1834 /* 1835 * Normal case; queue the callback for the thread. 1836 */ 1837 CRYPTO_RETW_LOCK(ret_worker); 1838 if (CRYPTOP_ASYNC_KEEPORDER(crp)) { 1839 struct cryptop *tmp; 1840 1841 TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, 1842 cryptop_q, crp_next) { 1843 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 1844 TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, 1845 tmp, crp, crp_next); 1846 break; 1847 } 1848 } 1849 if (tmp == NULL) { 1850 TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, 1851 crp, crp_next); 1852 } 1853 1854 if (crp->crp_seq == ret_worker->reorder_cur_seq) 1855 wake = true; 1856 } 1857 else { 1858 if (CRYPTO_RETW_EMPTY(ret_worker)) 1859 wake = true; 1860 1861 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); 1862 } 1863 1864 if (wake) 1865 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 1866 CRYPTO_RETW_UNLOCK(ret_worker); 1867 } 1868 } 1869 1870 /* 1871 * Invoke the callback on behalf of the driver. 1872 */ 1873 void 1874 crypto_kdone(struct cryptkop *krp) 1875 { 1876 struct crypto_ret_worker *ret_worker; 1877 struct cryptocap *cap; 1878 1879 if (krp->krp_status != 0) 1880 CRYPTOSTAT_INC(cs_kerrs); 1881 CRYPTO_DRIVER_LOCK(); 1882 cap = krp->krp_cap; 1883 KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); 1884 cap->cc_koperations--; 1885 if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 1886 wakeup(cap); 1887 CRYPTO_DRIVER_UNLOCK(); 1888 krp->krp_cap = NULL; 1889 cap_rele(cap); 1890 1891 ret_worker = CRYPTO_RETW(0); 1892 1893 CRYPTO_RETW_LOCK(ret_worker); 1894 if (CRYPTO_RETW_EMPTY(ret_worker)) 1895 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 1896 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); 1897 CRYPTO_RETW_UNLOCK(ret_worker); 1898 } 1899 1900 int 1901 crypto_getfeat(int *featp) 1902 { 1903 int hid, kalg, feat = 0; 1904 1905 CRYPTO_DRIVER_LOCK(); 1906 for (hid = 0; hid < crypto_drivers_size; hid++) { 1907 const struct cryptocap *cap = crypto_drivers[hid]; 1908 1909 if (cap == NULL || 1910 ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1911 !crypto_devallowsoft)) { 1912 continue; 1913 } 1914 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 1915 if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) 1916 feat |= 1 << kalg; 1917 } 1918 CRYPTO_DRIVER_UNLOCK(); 1919 *featp = feat; 1920 return (0); 1921 } 1922 1923 /* 1924 * Terminate a thread at module unload. The process that 1925 * initiated this is waiting for us to signal that we're gone; 1926 * wake it up and exit. We use the driver table lock to insure 1927 * we don't do the wakeup before they're waiting. There is no 1928 * race here because the waiter sleeps on the proc lock for the 1929 * thread so it gets notified at the right time because of an 1930 * extra wakeup that's done in exit1(). 1931 */ 1932 static void 1933 crypto_finis(void *chan) 1934 { 1935 CRYPTO_DRIVER_LOCK(); 1936 wakeup_one(chan); 1937 CRYPTO_DRIVER_UNLOCK(); 1938 kproc_exit(0); 1939 } 1940 1941 /* 1942 * Crypto thread, dispatches crypto requests. 1943 */ 1944 static void 1945 crypto_proc(void) 1946 { 1947 struct cryptop *crp, *submit; 1948 struct cryptkop *krp; 1949 struct cryptocap *cap; 1950 int result, hint; 1951 1952 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1953 fpu_kern_thread(FPU_KERN_NORMAL); 1954 #endif 1955 1956 CRYPTO_Q_LOCK(); 1957 for (;;) { 1958 /* 1959 * Find the first element in the queue that can be 1960 * processed and look-ahead to see if multiple ops 1961 * are ready for the same driver. 1962 */ 1963 submit = NULL; 1964 hint = 0; 1965 TAILQ_FOREACH(crp, &crp_q, crp_next) { 1966 cap = crp->crp_session->cap; 1967 /* 1968 * Driver cannot disappeared when there is an active 1969 * session. 1970 */ 1971 KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1972 __func__, __LINE__)); 1973 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1974 /* Op needs to be migrated, process it. */ 1975 if (submit == NULL) 1976 submit = crp; 1977 break; 1978 } 1979 if (!cap->cc_qblocked) { 1980 if (submit != NULL) { 1981 /* 1982 * We stop on finding another op, 1983 * regardless whether its for the same 1984 * driver or not. We could keep 1985 * searching the queue but it might be 1986 * better to just use a per-driver 1987 * queue instead. 1988 */ 1989 if (submit->crp_session->cap == cap) 1990 hint = CRYPTO_HINT_MORE; 1991 break; 1992 } else { 1993 submit = crp; 1994 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) 1995 break; 1996 /* keep scanning for more are q'd */ 1997 } 1998 } 1999 } 2000 if (submit != NULL) { 2001 TAILQ_REMOVE(&crp_q, submit, crp_next); 2002 cap = submit->crp_session->cap; 2003 KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 2004 __func__, __LINE__)); 2005 CRYPTO_Q_UNLOCK(); 2006 result = crypto_invoke(cap, submit, hint); 2007 CRYPTO_Q_LOCK(); 2008 if (result == ERESTART) { 2009 /* 2010 * The driver ran out of resources, mark the 2011 * driver ``blocked'' for cryptop's and put 2012 * the request back in the queue. It would 2013 * best to put the request back where we got 2014 * it but that's hard so for now we put it 2015 * at the front. This should be ok; putting 2016 * it at the end does not work. 2017 */ 2018 cap->cc_qblocked = 1; 2019 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 2020 CRYPTOSTAT_INC(cs_blocks); 2021 } 2022 } 2023 2024 /* As above, but for key ops */ 2025 TAILQ_FOREACH(krp, &crp_kq, krp_next) { 2026 cap = krp->krp_cap; 2027 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 2028 /* 2029 * Operation needs to be migrated, 2030 * clear krp_cap so a new driver is 2031 * selected. 2032 */ 2033 krp->krp_cap = NULL; 2034 cap_rele(cap); 2035 break; 2036 } 2037 if (!cap->cc_kqblocked) 2038 break; 2039 } 2040 if (krp != NULL) { 2041 TAILQ_REMOVE(&crp_kq, krp, krp_next); 2042 CRYPTO_Q_UNLOCK(); 2043 result = crypto_kinvoke(krp); 2044 CRYPTO_Q_LOCK(); 2045 if (result == ERESTART) { 2046 /* 2047 * The driver ran out of resources, mark the 2048 * driver ``blocked'' for cryptkop's and put 2049 * the request back in the queue. It would 2050 * best to put the request back where we got 2051 * it but that's hard so for now we put it 2052 * at the front. This should be ok; putting 2053 * it at the end does not work. 2054 */ 2055 krp->krp_cap->cc_kqblocked = 1; 2056 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 2057 CRYPTOSTAT_INC(cs_kblocks); 2058 } 2059 } 2060 2061 if (submit == NULL && krp == NULL) { 2062 /* 2063 * Nothing more to be processed. Sleep until we're 2064 * woken because there are more ops to process. 2065 * This happens either by submission or by a driver 2066 * becoming unblocked and notifying us through 2067 * crypto_unblock. Note that when we wakeup we 2068 * start processing each queue again from the 2069 * front. It's not clear that it's important to 2070 * preserve this ordering since ops may finish 2071 * out of order if dispatched to different devices 2072 * and some become blocked while others do not. 2073 */ 2074 crp_sleep = 1; 2075 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 2076 crp_sleep = 0; 2077 if (cryptoproc == NULL) 2078 break; 2079 CRYPTOSTAT_INC(cs_intrs); 2080 } 2081 } 2082 CRYPTO_Q_UNLOCK(); 2083 2084 crypto_finis(&crp_q); 2085 } 2086 2087 /* 2088 * Crypto returns thread, does callbacks for processed crypto requests. 2089 * Callbacks are done here, rather than in the crypto drivers, because 2090 * callbacks typically are expensive and would slow interrupt handling. 2091 */ 2092 static void 2093 crypto_ret_proc(struct crypto_ret_worker *ret_worker) 2094 { 2095 struct cryptop *crpt; 2096 struct cryptkop *krpt; 2097 2098 CRYPTO_RETW_LOCK(ret_worker); 2099 for (;;) { 2100 /* Harvest return q's for completed ops */ 2101 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 2102 if (crpt != NULL) { 2103 if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 2104 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 2105 ret_worker->reorder_cur_seq++; 2106 } else { 2107 crpt = NULL; 2108 } 2109 } 2110 2111 if (crpt == NULL) { 2112 crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 2113 if (crpt != NULL) 2114 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 2115 } 2116 2117 krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); 2118 if (krpt != NULL) 2119 TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); 2120 2121 if (crpt != NULL || krpt != NULL) { 2122 CRYPTO_RETW_UNLOCK(ret_worker); 2123 /* 2124 * Run callbacks unlocked. 2125 */ 2126 if (crpt != NULL) 2127 crpt->crp_callback(crpt); 2128 if (krpt != NULL) 2129 krpt->krp_callback(krpt); 2130 CRYPTO_RETW_LOCK(ret_worker); 2131 } else { 2132 /* 2133 * Nothing more to be processed. Sleep until we're 2134 * woken because there are more returns to process. 2135 */ 2136 msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 2137 "crypto_ret_wait", 0); 2138 if (ret_worker->cryptoretproc == NULL) 2139 break; 2140 CRYPTOSTAT_INC(cs_rets); 2141 } 2142 } 2143 CRYPTO_RETW_UNLOCK(ret_worker); 2144 2145 crypto_finis(&ret_worker->crp_ret_q); 2146 } 2147 2148 #ifdef DDB 2149 static void 2150 db_show_drivers(void) 2151 { 2152 int hid; 2153 2154 db_printf("%12s %4s %4s %8s %2s %2s\n" 2155 , "Device" 2156 , "Ses" 2157 , "Kops" 2158 , "Flags" 2159 , "QB" 2160 , "KB" 2161 ); 2162 for (hid = 0; hid < crypto_drivers_size; hid++) { 2163 const struct cryptocap *cap = crypto_drivers[hid]; 2164 if (cap == NULL) 2165 continue; 2166 db_printf("%-12s %4u %4u %08x %2u %2u\n" 2167 , device_get_nameunit(cap->cc_dev) 2168 , cap->cc_sessions 2169 , cap->cc_koperations 2170 , cap->cc_flags 2171 , cap->cc_qblocked 2172 , cap->cc_kqblocked 2173 ); 2174 } 2175 } 2176 2177 DB_SHOW_COMMAND(crypto, db_show_crypto) 2178 { 2179 struct cryptop *crp; 2180 struct crypto_ret_worker *ret_worker; 2181 2182 db_show_drivers(); 2183 db_printf("\n"); 2184 2185 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 2186 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 2187 "Device", "Callback"); 2188 TAILQ_FOREACH(crp, &crp_q, crp_next) { 2189 db_printf("%4u %08x %4u %4u %04x %8p %8p\n" 2190 , crp->crp_session->cap->cc_hid 2191 , (int) crypto_ses2caps(crp->crp_session) 2192 , crp->crp_olen 2193 , crp->crp_etype 2194 , crp->crp_flags 2195 , device_get_nameunit(crp->crp_session->cap->cc_dev) 2196 , crp->crp_callback 2197 ); 2198 } 2199 FOREACH_CRYPTO_RETW(ret_worker) { 2200 db_printf("\n%8s %4s %4s %4s %8s\n", 2201 "ret_worker", "HID", "Etype", "Flags", "Callback"); 2202 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 2203 TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 2204 db_printf("%8td %4u %4u %04x %8p\n" 2205 , CRYPTO_RETW_ID(ret_worker) 2206 , crp->crp_session->cap->cc_hid 2207 , crp->crp_etype 2208 , crp->crp_flags 2209 , crp->crp_callback 2210 ); 2211 } 2212 } 2213 } 2214 } 2215 2216 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) 2217 { 2218 struct cryptkop *krp; 2219 struct crypto_ret_worker *ret_worker; 2220 2221 db_show_drivers(); 2222 db_printf("\n"); 2223 2224 db_printf("%4s %5s %4s %4s %8s %4s %8s\n", 2225 "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); 2226 TAILQ_FOREACH(krp, &crp_kq, krp_next) { 2227 db_printf("%4u %5u %4u %4u %08x %4u %8p\n" 2228 , krp->krp_op 2229 , krp->krp_status 2230 , krp->krp_iparams, krp->krp_oparams 2231 , krp->krp_crid, krp->krp_hid 2232 , krp->krp_callback 2233 ); 2234 } 2235 2236 ret_worker = CRYPTO_RETW(0); 2237 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 2238 db_printf("%4s %5s %8s %4s %8s\n", 2239 "Op", "Status", "CRID", "HID", "Callback"); 2240 TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { 2241 db_printf("%4u %5u %08x %4u %8p\n" 2242 , krp->krp_op 2243 , krp->krp_status 2244 , krp->krp_crid, krp->krp_hid 2245 , krp->krp_callback 2246 ); 2247 } 2248 } 2249 } 2250 #endif 2251 2252 int crypto_modevent(module_t mod, int type, void *unused); 2253 2254 /* 2255 * Initialization code, both for static and dynamic loading. 2256 * Note this is not invoked with the usual MODULE_DECLARE 2257 * mechanism but instead is listed as a dependency by the 2258 * cryptosoft driver. This guarantees proper ordering of 2259 * calls on module load/unload. 2260 */ 2261 int 2262 crypto_modevent(module_t mod, int type, void *unused) 2263 { 2264 int error = EINVAL; 2265 2266 switch (type) { 2267 case MOD_LOAD: 2268 error = crypto_init(); 2269 if (error == 0 && bootverbose) 2270 printf("crypto: <crypto core>\n"); 2271 break; 2272 case MOD_UNLOAD: 2273 /*XXX disallow if active sessions */ 2274 error = 0; 2275 crypto_destroy(); 2276 return 0; 2277 } 2278 return error; 2279 } 2280 MODULE_VERSION(crypto, 1); 2281 MODULE_DEPEND(crypto, zlib, 1, 1, 1); 2282