1 /*- 2 * Copyright (c) 2002-2006 Sam Leffler. All rights reserved. 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 */ 24 25 #include <sys/cdefs.h> 26 __FBSDID("$FreeBSD$"); 27 28 /* 29 * Cryptographic Subsystem. 30 * 31 * This code is derived from the Openbsd Cryptographic Framework (OCF) 32 * that has the copyright shown below. Very little of the original 33 * code remains. 34 */ 35 36 /*- 37 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 38 * 39 * This code was written by Angelos D. Keromytis in Athens, Greece, in 40 * February 2000. Network Security Technologies Inc. (NSTI) kindly 41 * supported the development of this code. 42 * 43 * Copyright (c) 2000, 2001 Angelos D. Keromytis 44 * 45 * Permission to use, copy, and modify this software with or without fee 46 * is hereby granted, provided that this entire notice is included in 47 * all source code copies of any software which is or includes a copy or 48 * modification of this software. 49 * 50 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 51 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 52 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 53 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 54 * PURPOSE. 55 */ 56 57 #include "opt_compat.h" 58 #include "opt_ddb.h" 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/counter.h> 63 #include <sys/kernel.h> 64 #include <sys/kthread.h> 65 #include <sys/linker.h> 66 #include <sys/lock.h> 67 #include <sys/module.h> 68 #include <sys/mutex.h> 69 #include <sys/malloc.h> 70 #include <sys/mbuf.h> 71 #include <sys/proc.h> 72 #include <sys/refcount.h> 73 #include <sys/sdt.h> 74 #include <sys/smp.h> 75 #include <sys/sysctl.h> 76 #include <sys/taskqueue.h> 77 #include <sys/uio.h> 78 79 #include <ddb/ddb.h> 80 81 #include <machine/vmparam.h> 82 #include <vm/uma.h> 83 84 #include <crypto/intake.h> 85 #include <opencrypto/cryptodev.h> 86 #include <opencrypto/xform_auth.h> 87 #include <opencrypto/xform_enc.h> 88 89 #include <sys/kobj.h> 90 #include <sys/bus.h> 91 #include "cryptodev_if.h" 92 93 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 94 #include <machine/pcb.h> 95 #endif 96 97 SDT_PROVIDER_DEFINE(opencrypto); 98 99 /* 100 * Crypto drivers register themselves by allocating a slot in the 101 * crypto_drivers table with crypto_get_driverid() and then registering 102 * each asym algorithm they support with crypto_kregister(). 103 */ 104 static struct mtx crypto_drivers_mtx; /* lock on driver table */ 105 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 106 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 107 #define CRYPTO_DRIVER_ASSERT() mtx_assert(&crypto_drivers_mtx, MA_OWNED) 108 109 /* 110 * Crypto device/driver capabilities structure. 111 * 112 * Synchronization: 113 * (d) - protected by CRYPTO_DRIVER_LOCK() 114 * (q) - protected by CRYPTO_Q_LOCK() 115 * Not tagged fields are read-only. 116 */ 117 struct cryptocap { 118 device_t cc_dev; 119 uint32_t cc_hid; 120 uint32_t cc_sessions; /* (d) # of sessions */ 121 uint32_t cc_koperations; /* (d) # os asym operations */ 122 uint8_t cc_kalg[CRK_ALGORITHM_MAX + 1]; 123 124 int cc_flags; /* (d) flags */ 125 #define CRYPTOCAP_F_CLEANUP 0x80000000 /* needs resource cleanup */ 126 int cc_qblocked; /* (q) symmetric q blocked */ 127 int cc_kqblocked; /* (q) asymmetric q blocked */ 128 size_t cc_session_size; 129 volatile int cc_refs; 130 }; 131 132 static struct cryptocap **crypto_drivers = NULL; 133 static int crypto_drivers_size = 0; 134 135 struct crypto_session { 136 struct cryptocap *cap; 137 struct crypto_session_params csp; 138 uint64_t id; 139 /* Driver softc follows. */ 140 }; 141 142 /* 143 * There are two queues for crypto requests; one for symmetric (e.g. 144 * cipher) operations and one for asymmetric (e.g. MOD)operations. 145 * A single mutex is used to lock access to both queues. We could 146 * have one per-queue but having one simplifies handling of block/unblock 147 * operations. 148 */ 149 static int crp_sleep = 0; 150 static TAILQ_HEAD(cryptop_q ,cryptop) crp_q; /* request queues */ 151 static TAILQ_HEAD(,cryptkop) crp_kq; 152 static struct mtx crypto_q_mtx; 153 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 154 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 155 156 SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0, 157 "In-kernel cryptography"); 158 159 /* 160 * Taskqueue used to dispatch the crypto requests 161 * that have the CRYPTO_F_ASYNC flag 162 */ 163 static struct taskqueue *crypto_tq; 164 165 /* 166 * Crypto seq numbers are operated on with modular arithmetic 167 */ 168 #define CRYPTO_SEQ_GT(a,b) ((int)((a)-(b)) > 0) 169 170 struct crypto_ret_worker { 171 struct mtx crypto_ret_mtx; 172 173 TAILQ_HEAD(,cryptop) crp_ordered_ret_q; /* ordered callback queue for symetric jobs */ 174 TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queue for symetric jobs */ 175 TAILQ_HEAD(,cryptkop) crp_ret_kq; /* callback queue for asym jobs */ 176 177 uint32_t reorder_ops; /* total ordered sym jobs received */ 178 uint32_t reorder_cur_seq; /* current sym job dispatched */ 179 180 struct proc *cryptoretproc; 181 }; 182 static struct crypto_ret_worker *crypto_ret_workers = NULL; 183 184 #define CRYPTO_RETW(i) (&crypto_ret_workers[i]) 185 #define CRYPTO_RETW_ID(w) ((w) - crypto_ret_workers) 186 #define FOREACH_CRYPTO_RETW(w) \ 187 for (w = crypto_ret_workers; w < crypto_ret_workers + crypto_workers_num; ++w) 188 189 #define CRYPTO_RETW_LOCK(w) mtx_lock(&w->crypto_ret_mtx) 190 #define CRYPTO_RETW_UNLOCK(w) mtx_unlock(&w->crypto_ret_mtx) 191 #define CRYPTO_RETW_EMPTY(w) \ 192 (TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q)) 193 194 static int crypto_workers_num = 0; 195 SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN, 196 &crypto_workers_num, 0, 197 "Number of crypto workers used to dispatch crypto jobs"); 198 #ifdef COMPAT_FREEBSD12 199 SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN, 200 &crypto_workers_num, 0, 201 "Number of crypto workers used to dispatch crypto jobs"); 202 #endif 203 204 static uma_zone_t cryptop_zone; 205 206 int crypto_userasymcrypto = 1; 207 SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW, 208 &crypto_userasymcrypto, 0, 209 "Enable user-mode access to asymmetric crypto support"); 210 #ifdef COMPAT_FREEBSD12 211 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, 212 &crypto_userasymcrypto, 0, 213 "Enable/disable user-mode access to asymmetric crypto support"); 214 #endif 215 216 int crypto_devallowsoft = 0; 217 SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW, 218 &crypto_devallowsoft, 0, 219 "Enable use of software crypto by /dev/crypto"); 220 #ifdef COMPAT_FREEBSD12 221 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, 222 &crypto_devallowsoft, 0, 223 "Enable/disable use of software crypto by /dev/crypto"); 224 #endif 225 226 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 227 228 static void crypto_proc(void); 229 static struct proc *cryptoproc; 230 static void crypto_ret_proc(struct crypto_ret_worker *ret_worker); 231 static void crypto_destroy(void); 232 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint); 233 static int crypto_kinvoke(struct cryptkop *krp); 234 static void crypto_task_invoke(void *ctx, int pending); 235 static void crypto_batch_enqueue(struct cryptop *crp); 236 237 static counter_u64_t cryptostats[sizeof(struct cryptostats) / sizeof(uint64_t)]; 238 SYSCTL_COUNTER_U64_ARRAY(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, 239 cryptostats, nitems(cryptostats), 240 "Crypto system statistics"); 241 242 #define CRYPTOSTAT_INC(stat) do { \ 243 counter_u64_add( \ 244 cryptostats[offsetof(struct cryptostats, stat) / sizeof(uint64_t)],\ 245 1); \ 246 } while (0) 247 248 static void 249 cryptostats_init(void *arg __unused) 250 { 251 COUNTER_ARRAY_ALLOC(cryptostats, nitems(cryptostats), M_WAITOK); 252 } 253 SYSINIT(cryptostats_init, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_init, NULL); 254 255 static void 256 cryptostats_fini(void *arg __unused) 257 { 258 COUNTER_ARRAY_FREE(cryptostats, nitems(cryptostats)); 259 } 260 SYSUNINIT(cryptostats_fini, SI_SUB_COUNTER, SI_ORDER_ANY, cryptostats_fini, 261 NULL); 262 263 /* Try to avoid directly exposing the key buffer as a symbol */ 264 static struct keybuf *keybuf; 265 266 static struct keybuf empty_keybuf = { 267 .kb_nents = 0 268 }; 269 270 /* Obtain the key buffer from boot metadata */ 271 static void 272 keybuf_init(void) 273 { 274 caddr_t kmdp; 275 276 kmdp = preload_search_by_type("elf kernel"); 277 278 if (kmdp == NULL) 279 kmdp = preload_search_by_type("elf64 kernel"); 280 281 keybuf = (struct keybuf *)preload_search_info(kmdp, 282 MODINFO_METADATA | MODINFOMD_KEYBUF); 283 284 if (keybuf == NULL) 285 keybuf = &empty_keybuf; 286 } 287 288 /* It'd be nice if we could store these in some kind of secure memory... */ 289 struct keybuf * 290 get_keybuf(void) 291 { 292 293 return (keybuf); 294 } 295 296 static struct cryptocap * 297 cap_ref(struct cryptocap *cap) 298 { 299 300 refcount_acquire(&cap->cc_refs); 301 return (cap); 302 } 303 304 static void 305 cap_rele(struct cryptocap *cap) 306 { 307 308 if (refcount_release(&cap->cc_refs) == 0) 309 return; 310 311 KASSERT(cap->cc_sessions == 0, 312 ("freeing crypto driver with active sessions")); 313 KASSERT(cap->cc_koperations == 0, 314 ("freeing crypto driver with active key operations")); 315 316 free(cap, M_CRYPTO_DATA); 317 } 318 319 static int 320 crypto_init(void) 321 { 322 struct crypto_ret_worker *ret_worker; 323 int error; 324 325 mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table", 326 MTX_DEF|MTX_QUIET); 327 328 TAILQ_INIT(&crp_q); 329 TAILQ_INIT(&crp_kq); 330 mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF); 331 332 cryptop_zone = uma_zcreate("cryptop", 333 sizeof(struct cryptop), NULL, NULL, NULL, NULL, 334 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 335 336 crypto_drivers_size = CRYPTO_DRIVERS_INITIAL; 337 crypto_drivers = malloc(crypto_drivers_size * 338 sizeof(struct cryptocap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 339 340 if (crypto_workers_num < 1 || crypto_workers_num > mp_ncpus) 341 crypto_workers_num = mp_ncpus; 342 343 crypto_tq = taskqueue_create("crypto", M_WAITOK | M_ZERO, 344 taskqueue_thread_enqueue, &crypto_tq); 345 346 taskqueue_start_threads(&crypto_tq, crypto_workers_num, PRI_MIN_KERN, 347 "crypto"); 348 349 error = kproc_create((void (*)(void *)) crypto_proc, NULL, 350 &cryptoproc, 0, 0, "crypto"); 351 if (error) { 352 printf("crypto_init: cannot start crypto thread; error %d", 353 error); 354 goto bad; 355 } 356 357 crypto_ret_workers = mallocarray(crypto_workers_num, 358 sizeof(struct crypto_ret_worker), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 359 360 FOREACH_CRYPTO_RETW(ret_worker) { 361 TAILQ_INIT(&ret_worker->crp_ordered_ret_q); 362 TAILQ_INIT(&ret_worker->crp_ret_q); 363 TAILQ_INIT(&ret_worker->crp_ret_kq); 364 365 ret_worker->reorder_ops = 0; 366 ret_worker->reorder_cur_seq = 0; 367 368 mtx_init(&ret_worker->crypto_ret_mtx, "crypto", "crypto return queues", MTX_DEF); 369 370 error = kproc_create((void (*)(void *)) crypto_ret_proc, ret_worker, 371 &ret_worker->cryptoretproc, 0, 0, "crypto returns %td", CRYPTO_RETW_ID(ret_worker)); 372 if (error) { 373 printf("crypto_init: cannot start cryptoret thread; error %d", 374 error); 375 goto bad; 376 } 377 } 378 379 keybuf_init(); 380 381 return 0; 382 bad: 383 crypto_destroy(); 384 return error; 385 } 386 387 /* 388 * Signal a crypto thread to terminate. We use the driver 389 * table lock to synchronize the sleep/wakeups so that we 390 * are sure the threads have terminated before we release 391 * the data structures they use. See crypto_finis below 392 * for the other half of this song-and-dance. 393 */ 394 static void 395 crypto_terminate(struct proc **pp, void *q) 396 { 397 struct proc *p; 398 399 mtx_assert(&crypto_drivers_mtx, MA_OWNED); 400 p = *pp; 401 *pp = NULL; 402 if (p) { 403 wakeup_one(q); 404 PROC_LOCK(p); /* NB: insure we don't miss wakeup */ 405 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */ 406 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0); 407 PROC_UNLOCK(p); 408 CRYPTO_DRIVER_LOCK(); 409 } 410 } 411 412 static void 413 hmac_init_pad(const struct auth_hash *axf, const char *key, int klen, 414 void *auth_ctx, uint8_t padval) 415 { 416 uint8_t hmac_key[HMAC_MAX_BLOCK_LEN]; 417 u_int i; 418 419 KASSERT(axf->blocksize <= sizeof(hmac_key), 420 ("Invalid HMAC block size %d", axf->blocksize)); 421 422 /* 423 * If the key is larger than the block size, use the digest of 424 * the key as the key instead. 425 */ 426 memset(hmac_key, 0, sizeof(hmac_key)); 427 if (klen > axf->blocksize) { 428 axf->Init(auth_ctx); 429 axf->Update(auth_ctx, key, klen); 430 axf->Final(hmac_key, auth_ctx); 431 klen = axf->hashsize; 432 } else 433 memcpy(hmac_key, key, klen); 434 435 for (i = 0; i < axf->blocksize; i++) 436 hmac_key[i] ^= padval; 437 438 axf->Init(auth_ctx); 439 axf->Update(auth_ctx, hmac_key, axf->blocksize); 440 explicit_bzero(hmac_key, sizeof(hmac_key)); 441 } 442 443 void 444 hmac_init_ipad(const struct auth_hash *axf, const char *key, int klen, 445 void *auth_ctx) 446 { 447 448 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL); 449 } 450 451 void 452 hmac_init_opad(const struct auth_hash *axf, const char *key, int klen, 453 void *auth_ctx) 454 { 455 456 hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL); 457 } 458 459 static void 460 crypto_destroy(void) 461 { 462 struct crypto_ret_worker *ret_worker; 463 int i; 464 465 /* 466 * Terminate any crypto threads. 467 */ 468 if (crypto_tq != NULL) 469 taskqueue_drain_all(crypto_tq); 470 CRYPTO_DRIVER_LOCK(); 471 crypto_terminate(&cryptoproc, &crp_q); 472 FOREACH_CRYPTO_RETW(ret_worker) 473 crypto_terminate(&ret_worker->cryptoretproc, &ret_worker->crp_ret_q); 474 CRYPTO_DRIVER_UNLOCK(); 475 476 /* XXX flush queues??? */ 477 478 /* 479 * Reclaim dynamically allocated resources. 480 */ 481 for (i = 0; i < crypto_drivers_size; i++) { 482 if (crypto_drivers[i] != NULL) 483 cap_rele(crypto_drivers[i]); 484 } 485 free(crypto_drivers, M_CRYPTO_DATA); 486 487 if (cryptop_zone != NULL) 488 uma_zdestroy(cryptop_zone); 489 mtx_destroy(&crypto_q_mtx); 490 FOREACH_CRYPTO_RETW(ret_worker) 491 mtx_destroy(&ret_worker->crypto_ret_mtx); 492 free(crypto_ret_workers, M_CRYPTO_DATA); 493 if (crypto_tq != NULL) 494 taskqueue_free(crypto_tq); 495 mtx_destroy(&crypto_drivers_mtx); 496 } 497 498 uint32_t 499 crypto_ses2hid(crypto_session_t crypto_session) 500 { 501 return (crypto_session->cap->cc_hid); 502 } 503 504 uint32_t 505 crypto_ses2caps(crypto_session_t crypto_session) 506 { 507 return (crypto_session->cap->cc_flags & 0xff000000); 508 } 509 510 void * 511 crypto_get_driver_session(crypto_session_t crypto_session) 512 { 513 return (crypto_session + 1); 514 } 515 516 const struct crypto_session_params * 517 crypto_get_params(crypto_session_t crypto_session) 518 { 519 return (&crypto_session->csp); 520 } 521 522 struct auth_hash * 523 crypto_auth_hash(const struct crypto_session_params *csp) 524 { 525 526 switch (csp->csp_auth_alg) { 527 case CRYPTO_SHA1_HMAC: 528 return (&auth_hash_hmac_sha1); 529 case CRYPTO_SHA2_224_HMAC: 530 return (&auth_hash_hmac_sha2_224); 531 case CRYPTO_SHA2_256_HMAC: 532 return (&auth_hash_hmac_sha2_256); 533 case CRYPTO_SHA2_384_HMAC: 534 return (&auth_hash_hmac_sha2_384); 535 case CRYPTO_SHA2_512_HMAC: 536 return (&auth_hash_hmac_sha2_512); 537 case CRYPTO_NULL_HMAC: 538 return (&auth_hash_null); 539 case CRYPTO_RIPEMD160_HMAC: 540 return (&auth_hash_hmac_ripemd_160); 541 case CRYPTO_SHA1: 542 return (&auth_hash_sha1); 543 case CRYPTO_SHA2_224: 544 return (&auth_hash_sha2_224); 545 case CRYPTO_SHA2_256: 546 return (&auth_hash_sha2_256); 547 case CRYPTO_SHA2_384: 548 return (&auth_hash_sha2_384); 549 case CRYPTO_SHA2_512: 550 return (&auth_hash_sha2_512); 551 case CRYPTO_AES_NIST_GMAC: 552 switch (csp->csp_auth_klen) { 553 case 128 / 8: 554 return (&auth_hash_nist_gmac_aes_128); 555 case 192 / 8: 556 return (&auth_hash_nist_gmac_aes_192); 557 case 256 / 8: 558 return (&auth_hash_nist_gmac_aes_256); 559 default: 560 return (NULL); 561 } 562 case CRYPTO_BLAKE2B: 563 return (&auth_hash_blake2b); 564 case CRYPTO_BLAKE2S: 565 return (&auth_hash_blake2s); 566 case CRYPTO_POLY1305: 567 return (&auth_hash_poly1305); 568 case CRYPTO_AES_CCM_CBC_MAC: 569 switch (csp->csp_auth_klen) { 570 case 128 / 8: 571 return (&auth_hash_ccm_cbc_mac_128); 572 case 192 / 8: 573 return (&auth_hash_ccm_cbc_mac_192); 574 case 256 / 8: 575 return (&auth_hash_ccm_cbc_mac_256); 576 default: 577 return (NULL); 578 } 579 default: 580 return (NULL); 581 } 582 } 583 584 struct enc_xform * 585 crypto_cipher(const struct crypto_session_params *csp) 586 { 587 588 switch (csp->csp_cipher_alg) { 589 case CRYPTO_RIJNDAEL128_CBC: 590 return (&enc_xform_rijndael128); 591 case CRYPTO_AES_XTS: 592 return (&enc_xform_aes_xts); 593 case CRYPTO_AES_ICM: 594 return (&enc_xform_aes_icm); 595 case CRYPTO_AES_NIST_GCM_16: 596 return (&enc_xform_aes_nist_gcm); 597 case CRYPTO_CAMELLIA_CBC: 598 return (&enc_xform_camellia); 599 case CRYPTO_NULL_CBC: 600 return (&enc_xform_null); 601 case CRYPTO_CHACHA20: 602 return (&enc_xform_chacha20); 603 case CRYPTO_AES_CCM_16: 604 return (&enc_xform_ccm); 605 default: 606 return (NULL); 607 } 608 } 609 610 static struct cryptocap * 611 crypto_checkdriver(uint32_t hid) 612 { 613 614 return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]); 615 } 616 617 /* 618 * Select a driver for a new session that supports the specified 619 * algorithms and, optionally, is constrained according to the flags. 620 */ 621 static struct cryptocap * 622 crypto_select_driver(const struct crypto_session_params *csp, int flags) 623 { 624 struct cryptocap *cap, *best; 625 int best_match, error, hid; 626 627 CRYPTO_DRIVER_ASSERT(); 628 629 best = NULL; 630 for (hid = 0; hid < crypto_drivers_size; hid++) { 631 /* 632 * If there is no driver for this slot, or the driver 633 * is not appropriate (hardware or software based on 634 * match), then skip. 635 */ 636 cap = crypto_drivers[hid]; 637 if (cap == NULL || 638 (cap->cc_flags & flags) == 0) 639 continue; 640 641 error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp); 642 if (error >= 0) 643 continue; 644 645 /* 646 * Use the driver with the highest probe value. 647 * Hardware drivers use a higher probe value than 648 * software. In case of a tie, prefer the driver with 649 * the fewest active sessions. 650 */ 651 if (best == NULL || error > best_match || 652 (error == best_match && 653 cap->cc_sessions < best->cc_sessions)) { 654 best = cap; 655 best_match = error; 656 } 657 } 658 return best; 659 } 660 661 static enum alg_type { 662 ALG_NONE = 0, 663 ALG_CIPHER, 664 ALG_DIGEST, 665 ALG_KEYED_DIGEST, 666 ALG_COMPRESSION, 667 ALG_AEAD 668 } alg_types[] = { 669 [CRYPTO_SHA1_HMAC] = ALG_KEYED_DIGEST, 670 [CRYPTO_RIPEMD160_HMAC] = ALG_KEYED_DIGEST, 671 [CRYPTO_AES_CBC] = ALG_CIPHER, 672 [CRYPTO_SHA1] = ALG_DIGEST, 673 [CRYPTO_NULL_HMAC] = ALG_DIGEST, 674 [CRYPTO_NULL_CBC] = ALG_CIPHER, 675 [CRYPTO_DEFLATE_COMP] = ALG_COMPRESSION, 676 [CRYPTO_SHA2_256_HMAC] = ALG_KEYED_DIGEST, 677 [CRYPTO_SHA2_384_HMAC] = ALG_KEYED_DIGEST, 678 [CRYPTO_SHA2_512_HMAC] = ALG_KEYED_DIGEST, 679 [CRYPTO_CAMELLIA_CBC] = ALG_CIPHER, 680 [CRYPTO_AES_XTS] = ALG_CIPHER, 681 [CRYPTO_AES_ICM] = ALG_CIPHER, 682 [CRYPTO_AES_NIST_GMAC] = ALG_KEYED_DIGEST, 683 [CRYPTO_AES_NIST_GCM_16] = ALG_AEAD, 684 [CRYPTO_BLAKE2B] = ALG_KEYED_DIGEST, 685 [CRYPTO_BLAKE2S] = ALG_KEYED_DIGEST, 686 [CRYPTO_CHACHA20] = ALG_CIPHER, 687 [CRYPTO_SHA2_224_HMAC] = ALG_KEYED_DIGEST, 688 [CRYPTO_RIPEMD160] = ALG_DIGEST, 689 [CRYPTO_SHA2_224] = ALG_DIGEST, 690 [CRYPTO_SHA2_256] = ALG_DIGEST, 691 [CRYPTO_SHA2_384] = ALG_DIGEST, 692 [CRYPTO_SHA2_512] = ALG_DIGEST, 693 [CRYPTO_POLY1305] = ALG_KEYED_DIGEST, 694 [CRYPTO_AES_CCM_CBC_MAC] = ALG_KEYED_DIGEST, 695 [CRYPTO_AES_CCM_16] = ALG_AEAD, 696 }; 697 698 static enum alg_type 699 alg_type(int alg) 700 { 701 702 if (alg < nitems(alg_types)) 703 return (alg_types[alg]); 704 return (ALG_NONE); 705 } 706 707 static bool 708 alg_is_compression(int alg) 709 { 710 711 return (alg_type(alg) == ALG_COMPRESSION); 712 } 713 714 static bool 715 alg_is_cipher(int alg) 716 { 717 718 return (alg_type(alg) == ALG_CIPHER); 719 } 720 721 static bool 722 alg_is_digest(int alg) 723 { 724 725 return (alg_type(alg) == ALG_DIGEST || 726 alg_type(alg) == ALG_KEYED_DIGEST); 727 } 728 729 static bool 730 alg_is_keyed_digest(int alg) 731 { 732 733 return (alg_type(alg) == ALG_KEYED_DIGEST); 734 } 735 736 static bool 737 alg_is_aead(int alg) 738 { 739 740 return (alg_type(alg) == ALG_AEAD); 741 } 742 743 #define SUPPORTED_SES (CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD | CSP_F_ESN) 744 745 /* Various sanity checks on crypto session parameters. */ 746 static bool 747 check_csp(const struct crypto_session_params *csp) 748 { 749 struct auth_hash *axf; 750 751 /* Mode-independent checks. */ 752 if ((csp->csp_flags & ~(SUPPORTED_SES)) != 0) 753 return (false); 754 if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 || 755 csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0) 756 return (false); 757 if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0) 758 return (false); 759 if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0) 760 return (false); 761 762 switch (csp->csp_mode) { 763 case CSP_MODE_COMPRESS: 764 if (!alg_is_compression(csp->csp_cipher_alg)) 765 return (false); 766 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) 767 return (false); 768 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 769 return (false); 770 if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 || 771 csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 772 csp->csp_auth_mlen != 0) 773 return (false); 774 break; 775 case CSP_MODE_CIPHER: 776 if (!alg_is_cipher(csp->csp_cipher_alg)) 777 return (false); 778 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 779 return (false); 780 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 781 if (csp->csp_cipher_klen == 0) 782 return (false); 783 if (csp->csp_ivlen == 0) 784 return (false); 785 } 786 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 787 return (false); 788 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 || 789 csp->csp_auth_mlen != 0) 790 return (false); 791 break; 792 case CSP_MODE_DIGEST: 793 if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0) 794 return (false); 795 796 if (csp->csp_flags & CSP_F_SEPARATE_AAD) 797 return (false); 798 799 /* IV is optional for digests (e.g. GMAC). */ 800 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 801 return (false); 802 if (!alg_is_digest(csp->csp_auth_alg)) 803 return (false); 804 805 /* Key is optional for BLAKE2 digests. */ 806 if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 807 csp->csp_auth_alg == CRYPTO_BLAKE2S) 808 ; 809 else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 810 if (csp->csp_auth_klen == 0) 811 return (false); 812 } else { 813 if (csp->csp_auth_klen != 0) 814 return (false); 815 } 816 if (csp->csp_auth_mlen != 0) { 817 axf = crypto_auth_hash(csp); 818 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 819 return (false); 820 } 821 break; 822 case CSP_MODE_AEAD: 823 if (!alg_is_aead(csp->csp_cipher_alg)) 824 return (false); 825 if (csp->csp_cipher_klen == 0) 826 return (false); 827 if (csp->csp_ivlen == 0 || 828 csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 829 return (false); 830 if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0) 831 return (false); 832 833 /* 834 * XXX: Would be nice to have a better way to get this 835 * value. 836 */ 837 switch (csp->csp_cipher_alg) { 838 case CRYPTO_AES_NIST_GCM_16: 839 case CRYPTO_AES_CCM_16: 840 if (csp->csp_auth_mlen > 16) 841 return (false); 842 break; 843 } 844 break; 845 case CSP_MODE_ETA: 846 if (!alg_is_cipher(csp->csp_cipher_alg)) 847 return (false); 848 if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) { 849 if (csp->csp_cipher_klen == 0) 850 return (false); 851 if (csp->csp_ivlen == 0) 852 return (false); 853 } 854 if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN) 855 return (false); 856 if (!alg_is_digest(csp->csp_auth_alg)) 857 return (false); 858 859 /* Key is optional for BLAKE2 digests. */ 860 if (csp->csp_auth_alg == CRYPTO_BLAKE2B || 861 csp->csp_auth_alg == CRYPTO_BLAKE2S) 862 ; 863 else if (alg_is_keyed_digest(csp->csp_auth_alg)) { 864 if (csp->csp_auth_klen == 0) 865 return (false); 866 } else { 867 if (csp->csp_auth_klen != 0) 868 return (false); 869 } 870 if (csp->csp_auth_mlen != 0) { 871 axf = crypto_auth_hash(csp); 872 if (axf == NULL || csp->csp_auth_mlen > axf->hashsize) 873 return (false); 874 } 875 break; 876 default: 877 return (false); 878 } 879 880 return (true); 881 } 882 883 /* 884 * Delete a session after it has been detached from its driver. 885 */ 886 static void 887 crypto_deletesession(crypto_session_t cses) 888 { 889 struct cryptocap *cap; 890 891 cap = cses->cap; 892 893 zfree(cses, M_CRYPTO_DATA); 894 895 CRYPTO_DRIVER_LOCK(); 896 cap->cc_sessions--; 897 if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP) 898 wakeup(cap); 899 CRYPTO_DRIVER_UNLOCK(); 900 cap_rele(cap); 901 } 902 903 /* 904 * Create a new session. The crid argument specifies a crypto 905 * driver to use or constraints on a driver to select (hardware 906 * only, software only, either). Whatever driver is selected 907 * must be capable of the requested crypto algorithms. 908 */ 909 int 910 crypto_newsession(crypto_session_t *cses, 911 const struct crypto_session_params *csp, int crid) 912 { 913 static uint64_t sessid = 0; 914 crypto_session_t res; 915 struct cryptocap *cap; 916 int err; 917 918 if (!check_csp(csp)) 919 return (EINVAL); 920 921 res = NULL; 922 923 CRYPTO_DRIVER_LOCK(); 924 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 925 /* 926 * Use specified driver; verify it is capable. 927 */ 928 cap = crypto_checkdriver(crid); 929 if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0) 930 cap = NULL; 931 } else { 932 /* 933 * No requested driver; select based on crid flags. 934 */ 935 cap = crypto_select_driver(csp, crid); 936 } 937 if (cap == NULL) { 938 CRYPTO_DRIVER_UNLOCK(); 939 CRYPTDEB("no driver"); 940 return (EOPNOTSUPP); 941 } 942 cap_ref(cap); 943 cap->cc_sessions++; 944 CRYPTO_DRIVER_UNLOCK(); 945 946 /* Allocate a single block for the generic session and driver softc. */ 947 res = malloc(sizeof(*res) + cap->cc_session_size, M_CRYPTO_DATA, 948 M_WAITOK | M_ZERO); 949 res->cap = cap; 950 res->csp = *csp; 951 res->id = atomic_fetchadd_64(&sessid, 1); 952 953 /* Call the driver initialization routine. */ 954 err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp); 955 if (err != 0) { 956 CRYPTDEB("dev newsession failed: %d", err); 957 crypto_deletesession(res); 958 return (err); 959 } 960 961 *cses = res; 962 return (0); 963 } 964 965 /* 966 * Delete an existing session (or a reserved session on an unregistered 967 * driver). 968 */ 969 void 970 crypto_freesession(crypto_session_t cses) 971 { 972 struct cryptocap *cap; 973 974 if (cses == NULL) 975 return; 976 977 cap = cses->cap; 978 979 /* Call the driver cleanup routine, if available. */ 980 CRYPTODEV_FREESESSION(cap->cc_dev, cses); 981 982 crypto_deletesession(cses); 983 } 984 985 /* 986 * Return a new driver id. Registers a driver with the system so that 987 * it can be probed by subsequent sessions. 988 */ 989 int32_t 990 crypto_get_driverid(device_t dev, size_t sessionsize, int flags) 991 { 992 struct cryptocap *cap, **newdrv; 993 int i; 994 995 if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 996 device_printf(dev, 997 "no flags specified when registering driver\n"); 998 return -1; 999 } 1000 1001 cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1002 cap->cc_dev = dev; 1003 cap->cc_session_size = sessionsize; 1004 cap->cc_flags = flags; 1005 refcount_init(&cap->cc_refs, 1); 1006 1007 CRYPTO_DRIVER_LOCK(); 1008 for (;;) { 1009 for (i = 0; i < crypto_drivers_size; i++) { 1010 if (crypto_drivers[i] == NULL) 1011 break; 1012 } 1013 1014 if (i < crypto_drivers_size) 1015 break; 1016 1017 /* Out of entries, allocate some more. */ 1018 1019 if (2 * crypto_drivers_size <= crypto_drivers_size) { 1020 CRYPTO_DRIVER_UNLOCK(); 1021 printf("crypto: driver count wraparound!\n"); 1022 cap_rele(cap); 1023 return (-1); 1024 } 1025 CRYPTO_DRIVER_UNLOCK(); 1026 1027 newdrv = malloc(2 * crypto_drivers_size * 1028 sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO); 1029 1030 CRYPTO_DRIVER_LOCK(); 1031 memcpy(newdrv, crypto_drivers, 1032 crypto_drivers_size * sizeof(*crypto_drivers)); 1033 1034 crypto_drivers_size *= 2; 1035 1036 free(crypto_drivers, M_CRYPTO_DATA); 1037 crypto_drivers = newdrv; 1038 } 1039 1040 cap->cc_hid = i; 1041 crypto_drivers[i] = cap; 1042 CRYPTO_DRIVER_UNLOCK(); 1043 1044 if (bootverbose) 1045 printf("crypto: assign %s driver id %u, flags 0x%x\n", 1046 device_get_nameunit(dev), i, flags); 1047 1048 return i; 1049 } 1050 1051 /* 1052 * Lookup a driver by name. We match against the full device 1053 * name and unit, and against just the name. The latter gives 1054 * us a simple widlcarding by device name. On success return the 1055 * driver/hardware identifier; otherwise return -1. 1056 */ 1057 int 1058 crypto_find_driver(const char *match) 1059 { 1060 struct cryptocap *cap; 1061 int i, len = strlen(match); 1062 1063 CRYPTO_DRIVER_LOCK(); 1064 for (i = 0; i < crypto_drivers_size; i++) { 1065 if (crypto_drivers[i] == NULL) 1066 continue; 1067 cap = crypto_drivers[i]; 1068 if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 || 1069 strncmp(match, device_get_name(cap->cc_dev), len) == 0) { 1070 CRYPTO_DRIVER_UNLOCK(); 1071 return (i); 1072 } 1073 } 1074 CRYPTO_DRIVER_UNLOCK(); 1075 return (-1); 1076 } 1077 1078 /* 1079 * Return the device_t for the specified driver or NULL 1080 * if the driver identifier is invalid. 1081 */ 1082 device_t 1083 crypto_find_device_byhid(int hid) 1084 { 1085 struct cryptocap *cap; 1086 device_t dev; 1087 1088 dev = NULL; 1089 CRYPTO_DRIVER_LOCK(); 1090 cap = crypto_checkdriver(hid); 1091 if (cap != NULL) 1092 dev = cap->cc_dev; 1093 CRYPTO_DRIVER_UNLOCK(); 1094 return (dev); 1095 } 1096 1097 /* 1098 * Return the device/driver capabilities. 1099 */ 1100 int 1101 crypto_getcaps(int hid) 1102 { 1103 struct cryptocap *cap; 1104 int flags; 1105 1106 flags = 0; 1107 CRYPTO_DRIVER_LOCK(); 1108 cap = crypto_checkdriver(hid); 1109 if (cap != NULL) 1110 flags = cap->cc_flags; 1111 CRYPTO_DRIVER_UNLOCK(); 1112 return (flags); 1113 } 1114 1115 /* 1116 * Register support for a key-related algorithm. This routine 1117 * is called once for each algorithm supported a driver. 1118 */ 1119 int 1120 crypto_kregister(uint32_t driverid, int kalg, uint32_t flags) 1121 { 1122 struct cryptocap *cap; 1123 int err; 1124 1125 CRYPTO_DRIVER_LOCK(); 1126 1127 cap = crypto_checkdriver(driverid); 1128 if (cap != NULL && 1129 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 1130 /* 1131 * XXX Do some performance testing to determine placing. 1132 * XXX We probably need an auxiliary data structure that 1133 * XXX describes relative performances. 1134 */ 1135 1136 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 1137 if (bootverbose) 1138 printf("crypto: %s registers key alg %u flags %u\n" 1139 , device_get_nameunit(cap->cc_dev) 1140 , kalg 1141 , flags 1142 ); 1143 gone_in_dev(cap->cc_dev, 14, "asymmetric crypto"); 1144 err = 0; 1145 } else 1146 err = EINVAL; 1147 1148 CRYPTO_DRIVER_UNLOCK(); 1149 return err; 1150 } 1151 1152 /* 1153 * Unregister all algorithms associated with a crypto driver. 1154 * If there are pending sessions using it, leave enough information 1155 * around so that subsequent calls using those sessions will 1156 * correctly detect the driver has been unregistered and reroute 1157 * requests. 1158 */ 1159 int 1160 crypto_unregister_all(uint32_t driverid) 1161 { 1162 struct cryptocap *cap; 1163 1164 CRYPTO_DRIVER_LOCK(); 1165 cap = crypto_checkdriver(driverid); 1166 if (cap == NULL) { 1167 CRYPTO_DRIVER_UNLOCK(); 1168 return (EINVAL); 1169 } 1170 1171 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 1172 crypto_drivers[driverid] = NULL; 1173 1174 /* 1175 * XXX: This doesn't do anything to kick sessions that 1176 * have no pending operations. 1177 */ 1178 while (cap->cc_sessions != 0 || cap->cc_koperations != 0) 1179 mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0); 1180 CRYPTO_DRIVER_UNLOCK(); 1181 cap_rele(cap); 1182 1183 return (0); 1184 } 1185 1186 /* 1187 * Clear blockage on a driver. The what parameter indicates whether 1188 * the driver is now ready for cryptop's and/or cryptokop's. 1189 */ 1190 int 1191 crypto_unblock(uint32_t driverid, int what) 1192 { 1193 struct cryptocap *cap; 1194 int err; 1195 1196 CRYPTO_Q_LOCK(); 1197 cap = crypto_checkdriver(driverid); 1198 if (cap != NULL) { 1199 if (what & CRYPTO_SYMQ) 1200 cap->cc_qblocked = 0; 1201 if (what & CRYPTO_ASYMQ) 1202 cap->cc_kqblocked = 0; 1203 if (crp_sleep) 1204 wakeup_one(&crp_q); 1205 err = 0; 1206 } else 1207 err = EINVAL; 1208 CRYPTO_Q_UNLOCK(); 1209 1210 return err; 1211 } 1212 1213 size_t 1214 crypto_buffer_len(struct crypto_buffer *cb) 1215 { 1216 switch (cb->cb_type) { 1217 case CRYPTO_BUF_CONTIG: 1218 return (cb->cb_buf_len); 1219 case CRYPTO_BUF_MBUF: 1220 if (cb->cb_mbuf->m_flags & M_PKTHDR) 1221 return (cb->cb_mbuf->m_pkthdr.len); 1222 return (m_length(cb->cb_mbuf, NULL)); 1223 case CRYPTO_BUF_VMPAGE: 1224 return (cb->cb_vm_page_len); 1225 case CRYPTO_BUF_UIO: 1226 return (cb->cb_uio->uio_resid); 1227 default: 1228 return (0); 1229 } 1230 } 1231 1232 #ifdef INVARIANTS 1233 /* Various sanity checks on crypto requests. */ 1234 static void 1235 cb_sanity(struct crypto_buffer *cb, const char *name) 1236 { 1237 KASSERT(cb->cb_type > CRYPTO_BUF_NONE && cb->cb_type <= CRYPTO_BUF_LAST, 1238 ("incoming crp with invalid %s buffer type", name)); 1239 switch (cb->cb_type) { 1240 case CRYPTO_BUF_CONTIG: 1241 KASSERT(cb->cb_buf_len >= 0, 1242 ("incoming crp with -ve %s buffer length", name)); 1243 break; 1244 case CRYPTO_BUF_VMPAGE: 1245 KASSERT(CRYPTO_HAS_VMPAGE, 1246 ("incoming crp uses dmap on supported arch")); 1247 KASSERT(cb->cb_vm_page_len >= 0, 1248 ("incoming crp with -ve %s buffer length", name)); 1249 KASSERT(cb->cb_vm_page_offset >= 0, 1250 ("incoming crp with -ve %s buffer offset", name)); 1251 KASSERT(cb->cb_vm_page_offset < PAGE_SIZE, 1252 ("incoming crp with %s buffer offset greater than page size" 1253 , name)); 1254 break; 1255 default: 1256 break; 1257 } 1258 } 1259 1260 static void 1261 crp_sanity(struct cryptop *crp) 1262 { 1263 struct crypto_session_params *csp; 1264 struct crypto_buffer *out; 1265 size_t ilen, len, olen; 1266 1267 KASSERT(crp->crp_session != NULL, ("incoming crp without a session")); 1268 KASSERT(crp->crp_obuf.cb_type >= CRYPTO_BUF_NONE && 1269 crp->crp_obuf.cb_type <= CRYPTO_BUF_LAST, 1270 ("incoming crp with invalid output buffer type")); 1271 KASSERT(crp->crp_etype == 0, ("incoming crp with error")); 1272 KASSERT(!(crp->crp_flags & CRYPTO_F_DONE), 1273 ("incoming crp already done")); 1274 1275 csp = &crp->crp_session->csp; 1276 cb_sanity(&crp->crp_buf, "input"); 1277 ilen = crypto_buffer_len(&crp->crp_buf); 1278 olen = ilen; 1279 out = NULL; 1280 if (csp->csp_flags & CSP_F_SEPARATE_OUTPUT) { 1281 if (crp->crp_obuf.cb_type != CRYPTO_BUF_NONE) { 1282 cb_sanity(&crp->crp_obuf, "output"); 1283 out = &crp->crp_obuf; 1284 olen = crypto_buffer_len(out); 1285 } 1286 } else 1287 KASSERT(crp->crp_obuf.cb_type == CRYPTO_BUF_NONE, 1288 ("incoming crp with separate output buffer " 1289 "but no session support")); 1290 1291 switch (csp->csp_mode) { 1292 case CSP_MODE_COMPRESS: 1293 KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS || 1294 crp->crp_op == CRYPTO_OP_DECOMPRESS, 1295 ("invalid compression op %x", crp->crp_op)); 1296 break; 1297 case CSP_MODE_CIPHER: 1298 KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT || 1299 crp->crp_op == CRYPTO_OP_DECRYPT, 1300 ("invalid cipher op %x", crp->crp_op)); 1301 break; 1302 case CSP_MODE_DIGEST: 1303 KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST || 1304 crp->crp_op == CRYPTO_OP_VERIFY_DIGEST, 1305 ("invalid digest op %x", crp->crp_op)); 1306 break; 1307 case CSP_MODE_AEAD: 1308 KASSERT(crp->crp_op == 1309 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1310 crp->crp_op == 1311 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1312 ("invalid AEAD op %x", crp->crp_op)); 1313 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) 1314 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1315 ("GCM without a separate IV")); 1316 if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16) 1317 KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE, 1318 ("CCM without a separate IV")); 1319 break; 1320 case CSP_MODE_ETA: 1321 KASSERT(crp->crp_op == 1322 (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) || 1323 crp->crp_op == 1324 (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST), 1325 ("invalid ETA op %x", crp->crp_op)); 1326 break; 1327 } 1328 if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1329 if (crp->crp_aad == NULL) { 1330 KASSERT(crp->crp_aad_start == 0 || 1331 crp->crp_aad_start < ilen, 1332 ("invalid AAD start")); 1333 KASSERT(crp->crp_aad_length != 0 || 1334 crp->crp_aad_start == 0, 1335 ("AAD with zero length and non-zero start")); 1336 KASSERT(crp->crp_aad_length == 0 || 1337 crp->crp_aad_start + crp->crp_aad_length <= ilen, 1338 ("AAD outside input length")); 1339 } else { 1340 KASSERT(csp->csp_flags & CSP_F_SEPARATE_AAD, 1341 ("session doesn't support separate AAD buffer")); 1342 KASSERT(crp->crp_aad_start == 0, 1343 ("separate AAD buffer with non-zero AAD start")); 1344 KASSERT(crp->crp_aad_length != 0, 1345 ("separate AAD buffer with zero length")); 1346 } 1347 } else { 1348 KASSERT(crp->crp_aad == NULL && crp->crp_aad_start == 0 && 1349 crp->crp_aad_length == 0, 1350 ("AAD region in request not supporting AAD")); 1351 } 1352 if (csp->csp_ivlen == 0) { 1353 KASSERT((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0, 1354 ("IV_SEPARATE set when IV isn't used")); 1355 KASSERT(crp->crp_iv_start == 0, 1356 ("crp_iv_start set when IV isn't used")); 1357 } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) { 1358 KASSERT(crp->crp_iv_start == 0, 1359 ("IV_SEPARATE used with non-zero IV start")); 1360 } else { 1361 KASSERT(crp->crp_iv_start < ilen, 1362 ("invalid IV start")); 1363 KASSERT(crp->crp_iv_start + csp->csp_ivlen <= ilen, 1364 ("IV outside buffer length")); 1365 } 1366 /* XXX: payload_start of 0 should always be < ilen? */ 1367 KASSERT(crp->crp_payload_start == 0 || 1368 crp->crp_payload_start < ilen, 1369 ("invalid payload start")); 1370 KASSERT(crp->crp_payload_start + crp->crp_payload_length <= 1371 ilen, ("payload outside input buffer")); 1372 if (out == NULL) { 1373 KASSERT(crp->crp_payload_output_start == 0, 1374 ("payload output start non-zero without output buffer")); 1375 } else { 1376 KASSERT(crp->crp_payload_output_start < olen, 1377 ("invalid payload output start")); 1378 KASSERT(crp->crp_payload_output_start + 1379 crp->crp_payload_length <= olen, 1380 ("payload outside output buffer")); 1381 } 1382 if (csp->csp_mode == CSP_MODE_DIGEST || 1383 csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) { 1384 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) 1385 len = ilen; 1386 else 1387 len = olen; 1388 KASSERT(crp->crp_digest_start == 0 || 1389 crp->crp_digest_start < len, 1390 ("invalid digest start")); 1391 /* XXX: For the mlen == 0 case this check isn't perfect. */ 1392 KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <= len, 1393 ("digest outside buffer")); 1394 } else { 1395 KASSERT(crp->crp_digest_start == 0, 1396 ("non-zero digest start for request without a digest")); 1397 } 1398 if (csp->csp_cipher_klen != 0) 1399 KASSERT(csp->csp_cipher_key != NULL || 1400 crp->crp_cipher_key != NULL, 1401 ("cipher request without a key")); 1402 if (csp->csp_auth_klen != 0) 1403 KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL, 1404 ("auth request without a key")); 1405 KASSERT(crp->crp_callback != NULL, ("incoming crp without callback")); 1406 } 1407 #endif 1408 1409 /* 1410 * Add a crypto request to a queue, to be processed by the kernel thread. 1411 */ 1412 int 1413 crypto_dispatch(struct cryptop *crp) 1414 { 1415 struct cryptocap *cap; 1416 int result; 1417 1418 #ifdef INVARIANTS 1419 crp_sanity(crp); 1420 #endif 1421 1422 CRYPTOSTAT_INC(cs_ops); 1423 1424 crp->crp_retw_id = crp->crp_session->id % crypto_workers_num; 1425 1426 if (CRYPTOP_ASYNC(crp)) { 1427 if (crp->crp_flags & CRYPTO_F_ASYNC_KEEPORDER) { 1428 struct crypto_ret_worker *ret_worker; 1429 1430 ret_worker = CRYPTO_RETW(crp->crp_retw_id); 1431 1432 CRYPTO_RETW_LOCK(ret_worker); 1433 crp->crp_seq = ret_worker->reorder_ops++; 1434 CRYPTO_RETW_UNLOCK(ret_worker); 1435 } 1436 1437 TASK_INIT(&crp->crp_task, 0, crypto_task_invoke, crp); 1438 taskqueue_enqueue(crypto_tq, &crp->crp_task); 1439 return (0); 1440 } 1441 1442 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) { 1443 /* 1444 * Caller marked the request to be processed 1445 * immediately; dispatch it directly to the 1446 * driver unless the driver is currently blocked. 1447 */ 1448 cap = crp->crp_session->cap; 1449 if (!cap->cc_qblocked) { 1450 result = crypto_invoke(cap, crp, 0); 1451 if (result != ERESTART) 1452 return (result); 1453 /* 1454 * The driver ran out of resources, put the request on 1455 * the queue. 1456 */ 1457 } 1458 } 1459 crypto_batch_enqueue(crp); 1460 return 0; 1461 } 1462 1463 void 1464 crypto_batch_enqueue(struct cryptop *crp) 1465 { 1466 1467 CRYPTO_Q_LOCK(); 1468 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 1469 if (crp_sleep) 1470 wakeup_one(&crp_q); 1471 CRYPTO_Q_UNLOCK(); 1472 } 1473 1474 /* 1475 * Add an asymetric crypto request to a queue, 1476 * to be processed by the kernel thread. 1477 */ 1478 int 1479 crypto_kdispatch(struct cryptkop *krp) 1480 { 1481 int error; 1482 1483 CRYPTOSTAT_INC(cs_kops); 1484 1485 krp->krp_cap = NULL; 1486 error = crypto_kinvoke(krp); 1487 if (error == ERESTART) { 1488 CRYPTO_Q_LOCK(); 1489 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 1490 if (crp_sleep) 1491 wakeup_one(&crp_q); 1492 CRYPTO_Q_UNLOCK(); 1493 error = 0; 1494 } 1495 return error; 1496 } 1497 1498 /* 1499 * Verify a driver is suitable for the specified operation. 1500 */ 1501 static __inline int 1502 kdriver_suitable(const struct cryptocap *cap, const struct cryptkop *krp) 1503 { 1504 return (cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED) != 0; 1505 } 1506 1507 /* 1508 * Select a driver for an asym operation. The driver must 1509 * support the necessary algorithm. The caller can constrain 1510 * which device is selected with the flags parameter. The 1511 * algorithm we use here is pretty stupid; just use the first 1512 * driver that supports the algorithms we need. If there are 1513 * multiple suitable drivers we choose the driver with the 1514 * fewest active operations. We prefer hardware-backed 1515 * drivers to software ones when either may be used. 1516 */ 1517 static struct cryptocap * 1518 crypto_select_kdriver(const struct cryptkop *krp, int flags) 1519 { 1520 struct cryptocap *cap, *best; 1521 int match, hid; 1522 1523 CRYPTO_DRIVER_ASSERT(); 1524 1525 /* 1526 * Look first for hardware crypto devices if permitted. 1527 */ 1528 if (flags & CRYPTOCAP_F_HARDWARE) 1529 match = CRYPTOCAP_F_HARDWARE; 1530 else 1531 match = CRYPTOCAP_F_SOFTWARE; 1532 best = NULL; 1533 again: 1534 for (hid = 0; hid < crypto_drivers_size; hid++) { 1535 /* 1536 * If there is no driver for this slot, or the driver 1537 * is not appropriate (hardware or software based on 1538 * match), then skip. 1539 */ 1540 cap = crypto_drivers[hid]; 1541 if (cap == NULL || 1542 (cap->cc_flags & match) == 0) 1543 continue; 1544 1545 /* verify all the algorithms are supported. */ 1546 if (kdriver_suitable(cap, krp)) { 1547 if (best == NULL || 1548 cap->cc_koperations < best->cc_koperations) 1549 best = cap; 1550 } 1551 } 1552 if (best != NULL) 1553 return best; 1554 if (match == CRYPTOCAP_F_HARDWARE && (flags & CRYPTOCAP_F_SOFTWARE)) { 1555 /* sort of an Algol 68-style for loop */ 1556 match = CRYPTOCAP_F_SOFTWARE; 1557 goto again; 1558 } 1559 return best; 1560 } 1561 1562 /* 1563 * Choose a driver for an asymmetric crypto request. 1564 */ 1565 static struct cryptocap * 1566 crypto_lookup_kdriver(struct cryptkop *krp) 1567 { 1568 struct cryptocap *cap; 1569 uint32_t crid; 1570 1571 /* If this request is requeued, it might already have a driver. */ 1572 cap = krp->krp_cap; 1573 if (cap != NULL) 1574 return (cap); 1575 1576 /* Use krp_crid to choose a driver. */ 1577 crid = krp->krp_crid; 1578 if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) { 1579 cap = crypto_checkdriver(crid); 1580 if (cap != NULL) { 1581 /* 1582 * Driver present, it must support the 1583 * necessary algorithm and, if s/w drivers are 1584 * excluded, it must be registered as 1585 * hardware-backed. 1586 */ 1587 if (!kdriver_suitable(cap, krp) || 1588 (!crypto_devallowsoft && 1589 (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0)) 1590 cap = NULL; 1591 } 1592 } else { 1593 /* 1594 * No requested driver; select based on crid flags. 1595 */ 1596 if (!crypto_devallowsoft) /* NB: disallow s/w drivers */ 1597 crid &= ~CRYPTOCAP_F_SOFTWARE; 1598 cap = crypto_select_kdriver(krp, crid); 1599 } 1600 1601 if (cap != NULL) { 1602 krp->krp_cap = cap_ref(cap); 1603 krp->krp_hid = cap->cc_hid; 1604 } 1605 return (cap); 1606 } 1607 1608 /* 1609 * Dispatch an asymmetric crypto request. 1610 */ 1611 static int 1612 crypto_kinvoke(struct cryptkop *krp) 1613 { 1614 struct cryptocap *cap = NULL; 1615 int error; 1616 1617 KASSERT(krp != NULL, ("%s: krp == NULL", __func__)); 1618 KASSERT(krp->krp_callback != NULL, 1619 ("%s: krp->crp_callback == NULL", __func__)); 1620 1621 CRYPTO_DRIVER_LOCK(); 1622 cap = crypto_lookup_kdriver(krp); 1623 if (cap == NULL) { 1624 CRYPTO_DRIVER_UNLOCK(); 1625 krp->krp_status = ENODEV; 1626 crypto_kdone(krp); 1627 return (0); 1628 } 1629 1630 /* 1631 * If the device is blocked, return ERESTART to requeue it. 1632 */ 1633 if (cap->cc_kqblocked) { 1634 /* 1635 * XXX: Previously this set krp_status to ERESTART and 1636 * invoked crypto_kdone but the caller would still 1637 * requeue it. 1638 */ 1639 CRYPTO_DRIVER_UNLOCK(); 1640 return (ERESTART); 1641 } 1642 1643 cap->cc_koperations++; 1644 CRYPTO_DRIVER_UNLOCK(); 1645 error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0); 1646 if (error == ERESTART) { 1647 CRYPTO_DRIVER_LOCK(); 1648 cap->cc_koperations--; 1649 CRYPTO_DRIVER_UNLOCK(); 1650 return (error); 1651 } 1652 1653 KASSERT(error == 0, ("error %d returned from crypto_kprocess", error)); 1654 return (0); 1655 } 1656 1657 static void 1658 crypto_task_invoke(void *ctx, int pending) 1659 { 1660 struct cryptocap *cap; 1661 struct cryptop *crp; 1662 int result; 1663 1664 crp = (struct cryptop *)ctx; 1665 cap = crp->crp_session->cap; 1666 result = crypto_invoke(cap, crp, 0); 1667 if (result == ERESTART) 1668 crypto_batch_enqueue(crp); 1669 } 1670 1671 /* 1672 * Dispatch a crypto request to the appropriate crypto devices. 1673 */ 1674 static int 1675 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint) 1676 { 1677 1678 KASSERT(crp != NULL, ("%s: crp == NULL", __func__)); 1679 KASSERT(crp->crp_callback != NULL, 1680 ("%s: crp->crp_callback == NULL", __func__)); 1681 KASSERT(crp->crp_session != NULL, 1682 ("%s: crp->crp_session == NULL", __func__)); 1683 1684 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1685 struct crypto_session_params csp; 1686 crypto_session_t nses; 1687 1688 /* 1689 * Driver has unregistered; migrate the session and return 1690 * an error to the caller so they'll resubmit the op. 1691 * 1692 * XXX: What if there are more already queued requests for this 1693 * session? 1694 * 1695 * XXX: Real solution is to make sessions refcounted 1696 * and force callers to hold a reference when 1697 * assigning to crp_session. Could maybe change 1698 * crypto_getreq to accept a session pointer to make 1699 * that work. Alternatively, we could abandon the 1700 * notion of rewriting crp_session in requests forcing 1701 * the caller to deal with allocating a new session. 1702 * Perhaps provide a method to allow a crp's session to 1703 * be swapped that callers could use. 1704 */ 1705 csp = crp->crp_session->csp; 1706 crypto_freesession(crp->crp_session); 1707 1708 /* 1709 * XXX: Key pointers may no longer be valid. If we 1710 * really want to support this we need to define the 1711 * KPI such that 'csp' is required to be valid for the 1712 * duration of a session by the caller perhaps. 1713 * 1714 * XXX: If the keys have been changed this will reuse 1715 * the old keys. This probably suggests making 1716 * rekeying more explicit and updating the key 1717 * pointers in 'csp' when the keys change. 1718 */ 1719 if (crypto_newsession(&nses, &csp, 1720 CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0) 1721 crp->crp_session = nses; 1722 1723 crp->crp_etype = EAGAIN; 1724 crypto_done(crp); 1725 return 0; 1726 } else { 1727 /* 1728 * Invoke the driver to process the request. 1729 */ 1730 return CRYPTODEV_PROCESS(cap->cc_dev, crp, hint); 1731 } 1732 } 1733 1734 void 1735 crypto_destroyreq(struct cryptop *crp) 1736 { 1737 #ifdef DIAGNOSTIC 1738 { 1739 struct cryptop *crp2; 1740 struct crypto_ret_worker *ret_worker; 1741 1742 CRYPTO_Q_LOCK(); 1743 TAILQ_FOREACH(crp2, &crp_q, crp_next) { 1744 KASSERT(crp2 != crp, 1745 ("Freeing cryptop from the crypto queue (%p).", 1746 crp)); 1747 } 1748 CRYPTO_Q_UNLOCK(); 1749 1750 FOREACH_CRYPTO_RETW(ret_worker) { 1751 CRYPTO_RETW_LOCK(ret_worker); 1752 TAILQ_FOREACH(crp2, &ret_worker->crp_ret_q, crp_next) { 1753 KASSERT(crp2 != crp, 1754 ("Freeing cryptop from the return queue (%p).", 1755 crp)); 1756 } 1757 CRYPTO_RETW_UNLOCK(ret_worker); 1758 } 1759 } 1760 #endif 1761 } 1762 1763 void 1764 crypto_freereq(struct cryptop *crp) 1765 { 1766 if (crp == NULL) 1767 return; 1768 1769 crypto_destroyreq(crp); 1770 uma_zfree(cryptop_zone, crp); 1771 } 1772 1773 static void 1774 _crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1775 { 1776 crp->crp_session = cses; 1777 } 1778 1779 void 1780 crypto_initreq(struct cryptop *crp, crypto_session_t cses) 1781 { 1782 memset(crp, 0, sizeof(*crp)); 1783 _crypto_initreq(crp, cses); 1784 } 1785 1786 struct cryptop * 1787 crypto_getreq(crypto_session_t cses, int how) 1788 { 1789 struct cryptop *crp; 1790 1791 MPASS(how == M_WAITOK || how == M_NOWAIT); 1792 crp = uma_zalloc(cryptop_zone, how | M_ZERO); 1793 if (crp != NULL) 1794 _crypto_initreq(crp, cses); 1795 return (crp); 1796 } 1797 1798 /* 1799 * Invoke the callback on behalf of the driver. 1800 */ 1801 void 1802 crypto_done(struct cryptop *crp) 1803 { 1804 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0, 1805 ("crypto_done: op already done, flags 0x%x", crp->crp_flags)); 1806 crp->crp_flags |= CRYPTO_F_DONE; 1807 if (crp->crp_etype != 0) 1808 CRYPTOSTAT_INC(cs_errs); 1809 1810 /* 1811 * CBIMM means unconditionally do the callback immediately; 1812 * CBIFSYNC means do the callback immediately only if the 1813 * operation was done synchronously. Both are used to avoid 1814 * doing extraneous context switches; the latter is mostly 1815 * used with the software crypto driver. 1816 */ 1817 if (!CRYPTOP_ASYNC_KEEPORDER(crp) && 1818 ((crp->crp_flags & CRYPTO_F_CBIMM) || 1819 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) && 1820 (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)))) { 1821 /* 1822 * Do the callback directly. This is ok when the 1823 * callback routine does very little (e.g. the 1824 * /dev/crypto callback method just does a wakeup). 1825 */ 1826 crp->crp_callback(crp); 1827 } else { 1828 struct crypto_ret_worker *ret_worker; 1829 bool wake; 1830 1831 ret_worker = CRYPTO_RETW(crp->crp_retw_id); 1832 wake = false; 1833 1834 /* 1835 * Normal case; queue the callback for the thread. 1836 */ 1837 CRYPTO_RETW_LOCK(ret_worker); 1838 if (CRYPTOP_ASYNC_KEEPORDER(crp)) { 1839 struct cryptop *tmp; 1840 1841 TAILQ_FOREACH_REVERSE(tmp, &ret_worker->crp_ordered_ret_q, 1842 cryptop_q, crp_next) { 1843 if (CRYPTO_SEQ_GT(crp->crp_seq, tmp->crp_seq)) { 1844 TAILQ_INSERT_AFTER(&ret_worker->crp_ordered_ret_q, 1845 tmp, crp, crp_next); 1846 break; 1847 } 1848 } 1849 if (tmp == NULL) { 1850 TAILQ_INSERT_HEAD(&ret_worker->crp_ordered_ret_q, 1851 crp, crp_next); 1852 } 1853 1854 if (crp->crp_seq == ret_worker->reorder_cur_seq) 1855 wake = true; 1856 } 1857 else { 1858 if (CRYPTO_RETW_EMPTY(ret_worker)) 1859 wake = true; 1860 1861 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_q, crp, crp_next); 1862 } 1863 1864 if (wake) 1865 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 1866 CRYPTO_RETW_UNLOCK(ret_worker); 1867 } 1868 } 1869 1870 /* 1871 * Invoke the callback on behalf of the driver. 1872 */ 1873 void 1874 crypto_kdone(struct cryptkop *krp) 1875 { 1876 struct crypto_ret_worker *ret_worker; 1877 struct cryptocap *cap; 1878 1879 if (krp->krp_status != 0) 1880 CRYPTOSTAT_INC(cs_kerrs); 1881 cap = krp->krp_cap; 1882 if (cap != NULL) { 1883 CRYPTO_DRIVER_LOCK(); 1884 KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0")); 1885 cap->cc_koperations--; 1886 if (cap->cc_koperations == 0 && 1887 cap->cc_flags & CRYPTOCAP_F_CLEANUP) 1888 wakeup(cap); 1889 CRYPTO_DRIVER_UNLOCK(); 1890 krp->krp_cap = NULL; 1891 cap_rele(cap); 1892 } 1893 1894 ret_worker = CRYPTO_RETW(0); 1895 1896 CRYPTO_RETW_LOCK(ret_worker); 1897 if (CRYPTO_RETW_EMPTY(ret_worker)) 1898 wakeup_one(&ret_worker->crp_ret_q); /* shared wait channel */ 1899 TAILQ_INSERT_TAIL(&ret_worker->crp_ret_kq, krp, krp_next); 1900 CRYPTO_RETW_UNLOCK(ret_worker); 1901 } 1902 1903 int 1904 crypto_getfeat(int *featp) 1905 { 1906 int hid, kalg, feat = 0; 1907 1908 CRYPTO_DRIVER_LOCK(); 1909 for (hid = 0; hid < crypto_drivers_size; hid++) { 1910 const struct cryptocap *cap = crypto_drivers[hid]; 1911 1912 if (cap == NULL || 1913 ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) && 1914 !crypto_devallowsoft)) { 1915 continue; 1916 } 1917 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 1918 if (cap->cc_kalg[kalg] & CRYPTO_ALG_FLAG_SUPPORTED) 1919 feat |= 1 << kalg; 1920 } 1921 CRYPTO_DRIVER_UNLOCK(); 1922 *featp = feat; 1923 return (0); 1924 } 1925 1926 /* 1927 * Terminate a thread at module unload. The process that 1928 * initiated this is waiting for us to signal that we're gone; 1929 * wake it up and exit. We use the driver table lock to insure 1930 * we don't do the wakeup before they're waiting. There is no 1931 * race here because the waiter sleeps on the proc lock for the 1932 * thread so it gets notified at the right time because of an 1933 * extra wakeup that's done in exit1(). 1934 */ 1935 static void 1936 crypto_finis(void *chan) 1937 { 1938 CRYPTO_DRIVER_LOCK(); 1939 wakeup_one(chan); 1940 CRYPTO_DRIVER_UNLOCK(); 1941 kproc_exit(0); 1942 } 1943 1944 /* 1945 * Crypto thread, dispatches crypto requests. 1946 */ 1947 static void 1948 crypto_proc(void) 1949 { 1950 struct cryptop *crp, *submit; 1951 struct cryptkop *krp; 1952 struct cryptocap *cap; 1953 int result, hint; 1954 1955 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1956 fpu_kern_thread(FPU_KERN_NORMAL); 1957 #endif 1958 1959 CRYPTO_Q_LOCK(); 1960 for (;;) { 1961 /* 1962 * Find the first element in the queue that can be 1963 * processed and look-ahead to see if multiple ops 1964 * are ready for the same driver. 1965 */ 1966 submit = NULL; 1967 hint = 0; 1968 TAILQ_FOREACH(crp, &crp_q, crp_next) { 1969 cap = crp->crp_session->cap; 1970 /* 1971 * Driver cannot disappeared when there is an active 1972 * session. 1973 */ 1974 KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 1975 __func__, __LINE__)); 1976 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 1977 /* Op needs to be migrated, process it. */ 1978 if (submit == NULL) 1979 submit = crp; 1980 break; 1981 } 1982 if (!cap->cc_qblocked) { 1983 if (submit != NULL) { 1984 /* 1985 * We stop on finding another op, 1986 * regardless whether its for the same 1987 * driver or not. We could keep 1988 * searching the queue but it might be 1989 * better to just use a per-driver 1990 * queue instead. 1991 */ 1992 if (submit->crp_session->cap == cap) 1993 hint = CRYPTO_HINT_MORE; 1994 break; 1995 } else { 1996 submit = crp; 1997 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0) 1998 break; 1999 /* keep scanning for more are q'd */ 2000 } 2001 } 2002 } 2003 if (submit != NULL) { 2004 TAILQ_REMOVE(&crp_q, submit, crp_next); 2005 cap = submit->crp_session->cap; 2006 KASSERT(cap != NULL, ("%s:%u Driver disappeared.", 2007 __func__, __LINE__)); 2008 CRYPTO_Q_UNLOCK(); 2009 result = crypto_invoke(cap, submit, hint); 2010 CRYPTO_Q_LOCK(); 2011 if (result == ERESTART) { 2012 /* 2013 * The driver ran out of resources, mark the 2014 * driver ``blocked'' for cryptop's and put 2015 * the request back in the queue. It would 2016 * best to put the request back where we got 2017 * it but that's hard so for now we put it 2018 * at the front. This should be ok; putting 2019 * it at the end does not work. 2020 */ 2021 cap->cc_qblocked = 1; 2022 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 2023 CRYPTOSTAT_INC(cs_blocks); 2024 } 2025 } 2026 2027 /* As above, but for key ops */ 2028 TAILQ_FOREACH(krp, &crp_kq, krp_next) { 2029 cap = krp->krp_cap; 2030 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) { 2031 /* 2032 * Operation needs to be migrated, 2033 * clear krp_cap so a new driver is 2034 * selected. 2035 */ 2036 krp->krp_cap = NULL; 2037 cap_rele(cap); 2038 break; 2039 } 2040 if (!cap->cc_kqblocked) 2041 break; 2042 } 2043 if (krp != NULL) { 2044 TAILQ_REMOVE(&crp_kq, krp, krp_next); 2045 CRYPTO_Q_UNLOCK(); 2046 result = crypto_kinvoke(krp); 2047 CRYPTO_Q_LOCK(); 2048 if (result == ERESTART) { 2049 /* 2050 * The driver ran out of resources, mark the 2051 * driver ``blocked'' for cryptkop's and put 2052 * the request back in the queue. It would 2053 * best to put the request back where we got 2054 * it but that's hard so for now we put it 2055 * at the front. This should be ok; putting 2056 * it at the end does not work. 2057 */ 2058 krp->krp_cap->cc_kqblocked = 1; 2059 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 2060 CRYPTOSTAT_INC(cs_kblocks); 2061 } 2062 } 2063 2064 if (submit == NULL && krp == NULL) { 2065 /* 2066 * Nothing more to be processed. Sleep until we're 2067 * woken because there are more ops to process. 2068 * This happens either by submission or by a driver 2069 * becoming unblocked and notifying us through 2070 * crypto_unblock. Note that when we wakeup we 2071 * start processing each queue again from the 2072 * front. It's not clear that it's important to 2073 * preserve this ordering since ops may finish 2074 * out of order if dispatched to different devices 2075 * and some become blocked while others do not. 2076 */ 2077 crp_sleep = 1; 2078 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 2079 crp_sleep = 0; 2080 if (cryptoproc == NULL) 2081 break; 2082 CRYPTOSTAT_INC(cs_intrs); 2083 } 2084 } 2085 CRYPTO_Q_UNLOCK(); 2086 2087 crypto_finis(&crp_q); 2088 } 2089 2090 /* 2091 * Crypto returns thread, does callbacks for processed crypto requests. 2092 * Callbacks are done here, rather than in the crypto drivers, because 2093 * callbacks typically are expensive and would slow interrupt handling. 2094 */ 2095 static void 2096 crypto_ret_proc(struct crypto_ret_worker *ret_worker) 2097 { 2098 struct cryptop *crpt; 2099 struct cryptkop *krpt; 2100 2101 CRYPTO_RETW_LOCK(ret_worker); 2102 for (;;) { 2103 /* Harvest return q's for completed ops */ 2104 crpt = TAILQ_FIRST(&ret_worker->crp_ordered_ret_q); 2105 if (crpt != NULL) { 2106 if (crpt->crp_seq == ret_worker->reorder_cur_seq) { 2107 TAILQ_REMOVE(&ret_worker->crp_ordered_ret_q, crpt, crp_next); 2108 ret_worker->reorder_cur_seq++; 2109 } else { 2110 crpt = NULL; 2111 } 2112 } 2113 2114 if (crpt == NULL) { 2115 crpt = TAILQ_FIRST(&ret_worker->crp_ret_q); 2116 if (crpt != NULL) 2117 TAILQ_REMOVE(&ret_worker->crp_ret_q, crpt, crp_next); 2118 } 2119 2120 krpt = TAILQ_FIRST(&ret_worker->crp_ret_kq); 2121 if (krpt != NULL) 2122 TAILQ_REMOVE(&ret_worker->crp_ret_kq, krpt, krp_next); 2123 2124 if (crpt != NULL || krpt != NULL) { 2125 CRYPTO_RETW_UNLOCK(ret_worker); 2126 /* 2127 * Run callbacks unlocked. 2128 */ 2129 if (crpt != NULL) 2130 crpt->crp_callback(crpt); 2131 if (krpt != NULL) 2132 krpt->krp_callback(krpt); 2133 CRYPTO_RETW_LOCK(ret_worker); 2134 } else { 2135 /* 2136 * Nothing more to be processed. Sleep until we're 2137 * woken because there are more returns to process. 2138 */ 2139 msleep(&ret_worker->crp_ret_q, &ret_worker->crypto_ret_mtx, PWAIT, 2140 "crypto_ret_wait", 0); 2141 if (ret_worker->cryptoretproc == NULL) 2142 break; 2143 CRYPTOSTAT_INC(cs_rets); 2144 } 2145 } 2146 CRYPTO_RETW_UNLOCK(ret_worker); 2147 2148 crypto_finis(&ret_worker->crp_ret_q); 2149 } 2150 2151 #ifdef DDB 2152 static void 2153 db_show_drivers(void) 2154 { 2155 int hid; 2156 2157 db_printf("%12s %4s %4s %8s %2s %2s\n" 2158 , "Device" 2159 , "Ses" 2160 , "Kops" 2161 , "Flags" 2162 , "QB" 2163 , "KB" 2164 ); 2165 for (hid = 0; hid < crypto_drivers_size; hid++) { 2166 const struct cryptocap *cap = crypto_drivers[hid]; 2167 if (cap == NULL) 2168 continue; 2169 db_printf("%-12s %4u %4u %08x %2u %2u\n" 2170 , device_get_nameunit(cap->cc_dev) 2171 , cap->cc_sessions 2172 , cap->cc_koperations 2173 , cap->cc_flags 2174 , cap->cc_qblocked 2175 , cap->cc_kqblocked 2176 ); 2177 } 2178 } 2179 2180 DB_SHOW_COMMAND(crypto, db_show_crypto) 2181 { 2182 struct cryptop *crp; 2183 struct crypto_ret_worker *ret_worker; 2184 2185 db_show_drivers(); 2186 db_printf("\n"); 2187 2188 db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n", 2189 "HID", "Caps", "Ilen", "Olen", "Etype", "Flags", 2190 "Device", "Callback"); 2191 TAILQ_FOREACH(crp, &crp_q, crp_next) { 2192 db_printf("%4u %08x %4u %4u %04x %8p %8p\n" 2193 , crp->crp_session->cap->cc_hid 2194 , (int) crypto_ses2caps(crp->crp_session) 2195 , crp->crp_olen 2196 , crp->crp_etype 2197 , crp->crp_flags 2198 , device_get_nameunit(crp->crp_session->cap->cc_dev) 2199 , crp->crp_callback 2200 ); 2201 } 2202 FOREACH_CRYPTO_RETW(ret_worker) { 2203 db_printf("\n%8s %4s %4s %4s %8s\n", 2204 "ret_worker", "HID", "Etype", "Flags", "Callback"); 2205 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 2206 TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) { 2207 db_printf("%8td %4u %4u %04x %8p\n" 2208 , CRYPTO_RETW_ID(ret_worker) 2209 , crp->crp_session->cap->cc_hid 2210 , crp->crp_etype 2211 , crp->crp_flags 2212 , crp->crp_callback 2213 ); 2214 } 2215 } 2216 } 2217 } 2218 2219 DB_SHOW_COMMAND(kcrypto, db_show_kcrypto) 2220 { 2221 struct cryptkop *krp; 2222 struct crypto_ret_worker *ret_worker; 2223 2224 db_show_drivers(); 2225 db_printf("\n"); 2226 2227 db_printf("%4s %5s %4s %4s %8s %4s %8s\n", 2228 "Op", "Status", "#IP", "#OP", "CRID", "HID", "Callback"); 2229 TAILQ_FOREACH(krp, &crp_kq, krp_next) { 2230 db_printf("%4u %5u %4u %4u %08x %4u %8p\n" 2231 , krp->krp_op 2232 , krp->krp_status 2233 , krp->krp_iparams, krp->krp_oparams 2234 , krp->krp_crid, krp->krp_hid 2235 , krp->krp_callback 2236 ); 2237 } 2238 2239 ret_worker = CRYPTO_RETW(0); 2240 if (!TAILQ_EMPTY(&ret_worker->crp_ret_q)) { 2241 db_printf("%4s %5s %8s %4s %8s\n", 2242 "Op", "Status", "CRID", "HID", "Callback"); 2243 TAILQ_FOREACH(krp, &ret_worker->crp_ret_kq, krp_next) { 2244 db_printf("%4u %5u %08x %4u %8p\n" 2245 , krp->krp_op 2246 , krp->krp_status 2247 , krp->krp_crid, krp->krp_hid 2248 , krp->krp_callback 2249 ); 2250 } 2251 } 2252 } 2253 #endif 2254 2255 int crypto_modevent(module_t mod, int type, void *unused); 2256 2257 /* 2258 * Initialization code, both for static and dynamic loading. 2259 * Note this is not invoked with the usual MODULE_DECLARE 2260 * mechanism but instead is listed as a dependency by the 2261 * cryptosoft driver. This guarantees proper ordering of 2262 * calls on module load/unload. 2263 */ 2264 int 2265 crypto_modevent(module_t mod, int type, void *unused) 2266 { 2267 int error = EINVAL; 2268 2269 switch (type) { 2270 case MOD_LOAD: 2271 error = crypto_init(); 2272 if (error == 0 && bootverbose) 2273 printf("crypto: <crypto core>\n"); 2274 break; 2275 case MOD_UNLOAD: 2276 /*XXX disallow if active sessions */ 2277 error = 0; 2278 crypto_destroy(); 2279 return 0; 2280 } 2281 return error; 2282 } 2283 MODULE_VERSION(crypto, 1); 2284 MODULE_DEPEND(crypto, zlib, 1, 1, 1); 2285