1 /* $FreeBSD$ */ 2 /* $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $ */ 3 /* 4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu) 5 * 6 * This code was written by Angelos D. Keromytis in Athens, Greece, in 7 * February 2000. Network Security Technologies Inc. (NSTI) kindly 8 * supported the development of this code. 9 * 10 * Copyright (c) 2000, 2001 Angelos D. Keromytis 11 * 12 * Permission to use, copy, and modify this software with or without fee 13 * is hereby granted, provided that this entire notice is included in 14 * all source code copies of any software which is or includes a copy or 15 * modification of this software. 16 * 17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR 18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY 19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE 20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR 21 * PURPOSE. 22 */ 23 24 #include <sys/param.h> 25 #include <sys/systm.h> 26 #include <sys/eventhandler.h> 27 #include <sys/kernel.h> 28 #include <sys/kthread.h> 29 #include <sys/lock.h> 30 #include <sys/mutex.h> 31 #include <sys/malloc.h> 32 #include <sys/proc.h> 33 #include <sys/sysctl.h> 34 35 #include <vm/uma.h> 36 #include <opencrypto/cryptodev.h> 37 #include <opencrypto/xform.h> /* XXX for M_XDATA */ 38 39 #define SESID2HID(sid) (((sid) >> 32) & 0xffffffff) 40 41 /* 42 * Crypto drivers register themselves by allocating a slot in the 43 * crypto_drivers table with crypto_get_driverid() and then registering 44 * each algorithm they support with crypto_register() and crypto_kregister(). 45 */ 46 static struct mtx crypto_drivers_mtx; /* lock on driver table */ 47 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx) 48 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx) 49 static struct cryptocap *crypto_drivers = NULL; 50 static int crypto_drivers_num = 0; 51 52 /* 53 * There are two queues for crypto requests; one for symmetric (e.g. 54 * cipher) operations and one for asymmetric (e.g. MOD)operations. 55 * A single mutex is used to lock access to both queues. We could 56 * have one per-queue but having one simplifies handling of block/unblock 57 * operations. 58 */ 59 static TAILQ_HEAD(,cryptop) crp_q; /* request queues */ 60 static TAILQ_HEAD(,cryptkop) crp_kq; 61 static struct mtx crypto_q_mtx; 62 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx) 63 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx) 64 65 /* 66 * There are two queues for processing completed crypto requests; one 67 * for the symmetric and one for the asymmetric ops. We only need one 68 * but have two to avoid type futzing (cryptop vs. cryptkop). A single 69 * mutex is used to lock access to both queues. Note that this lock 70 * must be separate from the lock on request queues to insure driver 71 * callbacks don't generate lock order reversals. 72 */ 73 static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */ 74 static TAILQ_HEAD(,cryptkop) crp_ret_kq; 75 static struct mtx crypto_ret_q_mtx; 76 #define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx) 77 #define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx) 78 79 static uma_zone_t cryptop_zone; 80 static uma_zone_t cryptodesc_zone; 81 82 int crypto_usercrypto = 1; /* userland may open /dev/crypto */ 83 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW, 84 &crypto_usercrypto, 0, 85 "Enable/disable user-mode access to crypto support"); 86 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */ 87 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW, 88 &crypto_userasymcrypto, 0, 89 "Enable/disable user-mode access to asymmetric crypto support"); 90 int crypto_devallowsoft = 0; /* only use hardware crypto for asym */ 91 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW, 92 &crypto_devallowsoft, 0, 93 "Enable/disable use of software asym crypto support"); 94 95 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records"); 96 97 static void 98 crypto_init(void) 99 { 100 cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop), 101 0, 0, 0, 0, 102 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 103 cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc), 104 0, 0, 0, 0, 105 UMA_ALIGN_PTR, UMA_ZONE_ZINIT); 106 if (cryptodesc_zone == NULL || cryptop_zone == NULL) 107 panic("cannot setup crypto zones"); 108 109 mtx_init(&crypto_drivers_mtx, "crypto driver table", 110 NULL, MTX_DEF|MTX_QUIET); 111 112 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL; 113 crypto_drivers = malloc(crypto_drivers_num * 114 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO); 115 if (crypto_drivers == NULL) 116 panic("cannot setup crypto drivers"); 117 118 TAILQ_INIT(&crp_q); 119 TAILQ_INIT(&crp_kq); 120 mtx_init(&crypto_q_mtx, "crypto op queues", NULL, MTX_DEF); 121 122 TAILQ_INIT(&crp_ret_q); 123 TAILQ_INIT(&crp_ret_kq); 124 mtx_init(&crypto_ret_q_mtx, "crypto return queues", NULL, MTX_DEF); 125 } 126 127 /* 128 * Initialization code, both for static and dynamic loading. 129 */ 130 static int 131 crypto_modevent(module_t mod, int type, void *unused) 132 { 133 switch (type) { 134 case MOD_LOAD: 135 crypto_init(); 136 if (bootverbose) 137 printf("crypto: <crypto core>\n"); 138 return 0; 139 case MOD_UNLOAD: 140 /*XXX disallow if active sessions */ 141 /*XXX kill kthreads */ 142 return 0; 143 } 144 return EINVAL; 145 } 146 147 static moduledata_t crypto_mod = { 148 "crypto", 149 crypto_modevent, 150 0 151 }; 152 MODULE_VERSION(crypto, 1); 153 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_PSEUDO, SI_ORDER_SECOND); 154 155 /* 156 * Create a new session. 157 */ 158 int 159 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard) 160 { 161 struct cryptoini *cr; 162 u_int32_t hid, lid; 163 int err = EINVAL; 164 165 CRYPTO_DRIVER_LOCK(); 166 167 if (crypto_drivers == NULL) 168 goto done; 169 170 /* 171 * The algorithm we use here is pretty stupid; just use the 172 * first driver that supports all the algorithms we need. 173 * 174 * XXX We need more smarts here (in real life too, but that's 175 * XXX another story altogether). 176 */ 177 178 for (hid = 0; hid < crypto_drivers_num; hid++) { 179 /* 180 * If it's not initialized or has remaining sessions 181 * referencing it, skip. 182 */ 183 if (crypto_drivers[hid].cc_newsession == NULL || 184 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)) 185 continue; 186 187 /* Hardware required -- ignore software drivers. */ 188 if (hard > 0 && 189 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE)) 190 continue; 191 /* Software required -- ignore hardware drivers. */ 192 if (hard < 0 && 193 (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) 194 continue; 195 196 /* See if all the algorithms are supported. */ 197 for (cr = cri; cr; cr = cr->cri_next) 198 if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0) 199 break; 200 201 if (cr == NULL) { 202 /* Ok, all algorithms are supported. */ 203 204 /* 205 * Can't do everything in one session. 206 * 207 * XXX Fix this. We need to inject a "virtual" session layer right 208 * XXX about here. 209 */ 210 211 /* Call the driver initialization routine. */ 212 lid = hid; /* Pass the driver ID. */ 213 err = crypto_drivers[hid].cc_newsession( 214 crypto_drivers[hid].cc_arg, &lid, cri); 215 if (err == 0) { 216 (*sid) = hid; 217 (*sid) <<= 32; 218 (*sid) |= (lid & 0xffffffff); 219 crypto_drivers[hid].cc_sessions++; 220 } 221 break; 222 } 223 } 224 done: 225 CRYPTO_DRIVER_UNLOCK(); 226 return err; 227 } 228 229 /* 230 * Delete an existing session (or a reserved session on an unregistered 231 * driver). 232 */ 233 int 234 crypto_freesession(u_int64_t sid) 235 { 236 u_int32_t hid; 237 int err; 238 239 CRYPTO_DRIVER_LOCK(); 240 241 if (crypto_drivers == NULL) { 242 err = EINVAL; 243 goto done; 244 } 245 246 /* Determine two IDs. */ 247 hid = SESID2HID(sid); 248 249 if (hid >= crypto_drivers_num) { 250 err = ENOENT; 251 goto done; 252 } 253 254 if (crypto_drivers[hid].cc_sessions) 255 crypto_drivers[hid].cc_sessions--; 256 257 /* Call the driver cleanup routine, if available. */ 258 if (crypto_drivers[hid].cc_freesession) 259 err = crypto_drivers[hid].cc_freesession( 260 crypto_drivers[hid].cc_arg, sid); 261 else 262 err = 0; 263 264 /* 265 * If this was the last session of a driver marked as invalid, 266 * make the entry available for reuse. 267 */ 268 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) && 269 crypto_drivers[hid].cc_sessions == 0) 270 bzero(&crypto_drivers[hid], sizeof(struct cryptocap)); 271 272 done: 273 CRYPTO_DRIVER_UNLOCK(); 274 return err; 275 } 276 277 /* 278 * Return an unused driver id. Used by drivers prior to registering 279 * support for the algorithms they handle. 280 */ 281 int32_t 282 crypto_get_driverid(u_int32_t flags) 283 { 284 struct cryptocap *newdrv; 285 int i; 286 287 CRYPTO_DRIVER_LOCK(); 288 289 for (i = 0; i < crypto_drivers_num; i++) 290 if (crypto_drivers[i].cc_process == NULL && 291 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 && 292 crypto_drivers[i].cc_sessions == 0) 293 break; 294 295 /* Out of entries, allocate some more. */ 296 if (i == crypto_drivers_num) { 297 /* Be careful about wrap-around. */ 298 if (2 * crypto_drivers_num <= crypto_drivers_num) { 299 CRYPTO_DRIVER_UNLOCK(); 300 printf("crypto: driver count wraparound!\n"); 301 return -1; 302 } 303 304 newdrv = malloc(2 * crypto_drivers_num * 305 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO); 306 if (newdrv == NULL) { 307 CRYPTO_DRIVER_UNLOCK(); 308 printf("crypto: no space to expand driver table!\n"); 309 return -1; 310 } 311 312 bcopy(crypto_drivers, newdrv, 313 crypto_drivers_num * sizeof(struct cryptocap)); 314 315 crypto_drivers_num *= 2; 316 317 free(crypto_drivers, M_CRYPTO_DATA); 318 crypto_drivers = newdrv; 319 } 320 321 /* NB: state is zero'd on free */ 322 crypto_drivers[i].cc_sessions = 1; /* Mark */ 323 crypto_drivers[i].cc_flags = flags; 324 if (bootverbose) 325 printf("crypto: assign driver %u, flags %u\n", i, flags); 326 327 CRYPTO_DRIVER_UNLOCK(); 328 329 return i; 330 } 331 332 static struct cryptocap * 333 crypto_checkdriver(u_int32_t hid) 334 { 335 if (crypto_drivers == NULL) 336 return NULL; 337 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]); 338 } 339 340 /* 341 * Register support for a key-related algorithm. This routine 342 * is called once for each algorithm supported a driver. 343 */ 344 int 345 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags, 346 int (*kprocess)(void*, struct cryptkop *, int), 347 void *karg) 348 { 349 struct cryptocap *cap; 350 int err; 351 352 CRYPTO_DRIVER_LOCK(); 353 354 cap = crypto_checkdriver(driverid); 355 if (cap != NULL && 356 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) { 357 /* 358 * XXX Do some performance testing to determine placing. 359 * XXX We probably need an auxiliary data structure that 360 * XXX describes relative performances. 361 */ 362 363 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 364 if (bootverbose) 365 printf("crypto: driver %u registers key alg %u flags %u\n" 366 , driverid 367 , kalg 368 , flags 369 ); 370 371 if (cap->cc_kprocess == NULL) { 372 cap->cc_karg = karg; 373 cap->cc_kprocess = kprocess; 374 } 375 err = 0; 376 } else 377 err = EINVAL; 378 379 CRYPTO_DRIVER_UNLOCK(); 380 return err; 381 } 382 383 /* 384 * Register support for a non-key-related algorithm. This routine 385 * is called once for each such algorithm supported by a driver. 386 */ 387 int 388 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen, 389 u_int32_t flags, 390 int (*newses)(void*, u_int32_t*, struct cryptoini*), 391 int (*freeses)(void*, u_int64_t), 392 int (*process)(void*, struct cryptop *, int), 393 void *arg) 394 { 395 struct cryptocap *cap; 396 int err; 397 398 CRYPTO_DRIVER_LOCK(); 399 400 cap = crypto_checkdriver(driverid); 401 /* NB: algorithms are in the range [1..max] */ 402 if (cap != NULL && 403 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) { 404 /* 405 * XXX Do some performance testing to determine placing. 406 * XXX We probably need an auxiliary data structure that 407 * XXX describes relative performances. 408 */ 409 410 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED; 411 cap->cc_max_op_len[alg] = maxoplen; 412 if (bootverbose) 413 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n" 414 , driverid 415 , alg 416 , flags 417 , maxoplen 418 ); 419 420 if (cap->cc_process == NULL) { 421 cap->cc_arg = arg; 422 cap->cc_newsession = newses; 423 cap->cc_process = process; 424 cap->cc_freesession = freeses; 425 cap->cc_sessions = 0; /* Unmark */ 426 } 427 err = 0; 428 } else 429 err = EINVAL; 430 431 CRYPTO_DRIVER_UNLOCK(); 432 return err; 433 } 434 435 /* 436 * Unregister a crypto driver. If there are pending sessions using it, 437 * leave enough information around so that subsequent calls using those 438 * sessions will correctly detect the driver has been unregistered and 439 * reroute requests. 440 */ 441 int 442 crypto_unregister(u_int32_t driverid, int alg) 443 { 444 int i, err; 445 u_int32_t ses; 446 struct cryptocap *cap; 447 448 CRYPTO_DRIVER_LOCK(); 449 450 cap = crypto_checkdriver(driverid); 451 if (cap != NULL && 452 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) && 453 cap->cc_alg[alg] != 0) { 454 cap->cc_alg[alg] = 0; 455 cap->cc_max_op_len[alg] = 0; 456 457 /* Was this the last algorithm ? */ 458 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++) 459 if (cap->cc_alg[i] != 0) 460 break; 461 462 if (i == CRYPTO_ALGORITHM_MAX + 1) { 463 ses = cap->cc_sessions; 464 bzero(cap, sizeof(struct cryptocap)); 465 if (ses != 0) { 466 /* 467 * If there are pending sessions, just mark as invalid. 468 */ 469 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 470 cap->cc_sessions = ses; 471 } 472 } 473 err = 0; 474 } else 475 err = EINVAL; 476 477 CRYPTO_DRIVER_UNLOCK(); 478 return err; 479 } 480 481 /* 482 * Unregister all algorithms associated with a crypto driver. 483 * If there are pending sessions using it, leave enough information 484 * around so that subsequent calls using those sessions will 485 * correctly detect the driver has been unregistered and reroute 486 * requests. 487 */ 488 int 489 crypto_unregister_all(u_int32_t driverid) 490 { 491 int i, err; 492 u_int32_t ses; 493 struct cryptocap *cap; 494 495 CRYPTO_DRIVER_LOCK(); 496 497 cap = crypto_checkdriver(driverid); 498 if (cap != NULL) { 499 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) { 500 cap->cc_alg[i] = 0; 501 cap->cc_max_op_len[i] = 0; 502 } 503 ses = cap->cc_sessions; 504 bzero(cap, sizeof(struct cryptocap)); 505 if (ses != 0) { 506 /* 507 * If there are pending sessions, just mark as invalid. 508 */ 509 cap->cc_flags |= CRYPTOCAP_F_CLEANUP; 510 cap->cc_sessions = ses; 511 } 512 err = 0; 513 } else 514 err = EINVAL; 515 516 CRYPTO_DRIVER_UNLOCK(); 517 return err; 518 } 519 520 /* 521 * Clear blockage on a driver. The what parameter indicates whether 522 * the driver is now ready for cryptop's and/or cryptokop's. 523 */ 524 int 525 crypto_unblock(u_int32_t driverid, int what) 526 { 527 struct cryptocap *cap; 528 int needwakeup, err; 529 530 CRYPTO_Q_LOCK(); 531 cap = crypto_checkdriver(driverid); 532 if (cap != NULL) { 533 needwakeup = 0; 534 if (what & CRYPTO_SYMQ) { 535 needwakeup |= cap->cc_qblocked; 536 cap->cc_qblocked = 0; 537 } 538 if (what & CRYPTO_ASYMQ) { 539 needwakeup |= cap->cc_kqblocked; 540 cap->cc_kqblocked = 0; 541 } 542 if (needwakeup) 543 wakeup_one(&crp_q); 544 err = 0; 545 } else 546 err = EINVAL; 547 CRYPTO_Q_UNLOCK(); 548 549 return err; 550 } 551 552 /* 553 * Add a crypto request to a queue, to be processed by the kernel thread. 554 */ 555 int 556 crypto_dispatch(struct cryptop *crp) 557 { 558 struct cryptocap *cap; 559 int wasempty; 560 561 CRYPTO_Q_LOCK(); 562 wasempty = TAILQ_EMPTY(&crp_q); 563 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next); 564 565 /* 566 * Wakeup processing thread if driver is not blocked. 567 */ 568 cap = crypto_checkdriver(SESID2HID(crp->crp_sid)); 569 if (cap && !cap->cc_qblocked && wasempty) 570 wakeup_one(&crp_q); 571 CRYPTO_Q_UNLOCK(); 572 573 return 0; 574 } 575 576 /* 577 * Add an asymetric crypto request to a queue, 578 * to be processed by the kernel thread. 579 */ 580 int 581 crypto_kdispatch(struct cryptkop *krp) 582 { 583 struct cryptocap *cap; 584 int wasempty; 585 586 CRYPTO_Q_LOCK(); 587 wasempty = TAILQ_EMPTY(&crp_kq); 588 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next); 589 590 /* 591 * Wakeup processing thread if driver is not blocked. 592 */ 593 cap = crypto_checkdriver(krp->krp_hid); 594 if (cap && !cap->cc_kqblocked && wasempty) 595 wakeup_one(&crp_q); /* NB: shared wait channel */ 596 CRYPTO_Q_UNLOCK(); 597 598 return 0; 599 } 600 601 /* 602 * Dispatch an assymetric crypto request to the appropriate crypto devices. 603 */ 604 static int 605 crypto_kinvoke(struct cryptkop *krp, int hint) 606 { 607 u_int32_t hid; 608 int error; 609 610 mtx_assert(&crypto_q_mtx, MA_OWNED); 611 612 /* Sanity checks. */ 613 if (krp == NULL) 614 return EINVAL; 615 if (krp->krp_callback == NULL) { 616 free(krp, M_XDATA); /* XXX allocated in cryptodev */ 617 return EINVAL; 618 } 619 620 for (hid = 0; hid < crypto_drivers_num; hid++) { 621 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 622 !crypto_devallowsoft) 623 continue; 624 if (crypto_drivers[hid].cc_kprocess == NULL) 625 continue; 626 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] & 627 CRYPTO_ALG_FLAG_SUPPORTED) == 0) 628 continue; 629 break; 630 } 631 if (hid < crypto_drivers_num) { 632 krp->krp_hid = hid; 633 error = crypto_drivers[hid].cc_kprocess( 634 crypto_drivers[hid].cc_karg, krp, hint); 635 } else 636 error = ENODEV; 637 638 if (error) { 639 krp->krp_status = error; 640 crypto_kdone(krp); 641 } 642 return 0; 643 } 644 645 /* 646 * Dispatch a crypto request to the appropriate crypto devices. 647 */ 648 static int 649 crypto_invoke(struct cryptop *crp, int hint) 650 { 651 u_int32_t hid; 652 int (*process)(void*, struct cryptop *, int); 653 654 mtx_assert(&crypto_q_mtx, MA_OWNED); 655 656 /* Sanity checks. */ 657 if (crp == NULL) 658 return EINVAL; 659 if (crp->crp_callback == NULL) { 660 crypto_freereq(crp); 661 return EINVAL; 662 } 663 if (crp->crp_desc == NULL) { 664 crp->crp_etype = EINVAL; 665 crypto_done(crp); 666 return 0; 667 } 668 669 hid = SESID2HID(crp->crp_sid); 670 if (hid < crypto_drivers_num) { 671 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) 672 crypto_freesession(crp->crp_sid); 673 process = crypto_drivers[hid].cc_process; 674 } else { 675 process = NULL; 676 } 677 678 if (process == NULL) { 679 struct cryptodesc *crd; 680 u_int64_t nid; 681 682 /* 683 * Driver has unregistered; migrate the session and return 684 * an error to the caller so they'll resubmit the op. 685 */ 686 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next) 687 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI); 688 689 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0) 690 crp->crp_sid = nid; 691 692 crp->crp_etype = EAGAIN; 693 crypto_done(crp); 694 return 0; 695 } else { 696 /* 697 * Invoke the driver to process the request. 698 */ 699 return (*process)(crypto_drivers[hid].cc_arg, crp, hint); 700 } 701 } 702 703 /* 704 * Release a set of crypto descriptors. 705 */ 706 void 707 crypto_freereq(struct cryptop *crp) 708 { 709 struct cryptodesc *crd; 710 711 if (crp == NULL) 712 return; 713 714 while ((crd = crp->crp_desc) != NULL) { 715 crp->crp_desc = crd->crd_next; 716 uma_zfree(cryptodesc_zone, crd); 717 } 718 719 uma_zfree(cryptop_zone, crp); 720 } 721 722 /* 723 * Acquire a set of crypto descriptors. 724 */ 725 struct cryptop * 726 crypto_getreq(int num) 727 { 728 struct cryptodesc *crd; 729 struct cryptop *crp; 730 731 crp = uma_zalloc(cryptop_zone, 0); 732 if (crp != NULL) { 733 while (num--) { 734 crd = uma_zalloc(cryptodesc_zone, 0); 735 if (crd == NULL) { 736 crypto_freereq(crp); 737 return NULL; 738 } 739 740 crd->crd_next = crp->crp_desc; 741 crp->crp_desc = crd; 742 } 743 } 744 return crp; 745 } 746 747 /* 748 * Invoke the callback on behalf of the driver. 749 */ 750 void 751 crypto_done(struct cryptop *crp) 752 { 753 int wasempty; 754 755 CRYPTO_RETQ_LOCK(); 756 wasempty = TAILQ_EMPTY(&crp_ret_q); 757 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next); 758 759 if (wasempty) 760 wakeup_one(&crp_ret_q); /* shared wait channel */ 761 CRYPTO_RETQ_UNLOCK(); 762 } 763 764 /* 765 * Invoke the callback on behalf of the driver. 766 */ 767 void 768 crypto_kdone(struct cryptkop *krp) 769 { 770 int wasempty; 771 772 CRYPTO_RETQ_LOCK(); 773 wasempty = TAILQ_EMPTY(&crp_ret_kq); 774 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next); 775 776 if (wasempty) 777 wakeup_one(&crp_ret_q); /* shared wait channel */ 778 CRYPTO_RETQ_UNLOCK(); 779 } 780 781 int 782 crypto_getfeat(int *featp) 783 { 784 int hid, kalg, feat = 0; 785 786 if (!crypto_userasymcrypto) 787 goto out; 788 789 CRYPTO_DRIVER_LOCK(); 790 for (hid = 0; hid < crypto_drivers_num; hid++) { 791 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) && 792 !crypto_devallowsoft) { 793 continue; 794 } 795 if (crypto_drivers[hid].cc_kprocess == NULL) 796 continue; 797 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++) 798 if ((crypto_drivers[hid].cc_kalg[kalg] & 799 CRYPTO_ALG_FLAG_SUPPORTED) != 0) 800 feat |= 1 << kalg; 801 } 802 CRYPTO_DRIVER_UNLOCK(); 803 out: 804 *featp = feat; 805 return (0); 806 } 807 808 static struct proc *cryptoproc; 809 810 static void 811 crypto_shutdown(void *arg, int howto) 812 { 813 /* XXX flush queues */ 814 } 815 816 /* 817 * Crypto thread, dispatches crypto requests. 818 */ 819 static void 820 crypto_proc(void) 821 { 822 struct cryptop *crp, *submit; 823 struct cryptkop *krp; 824 struct cryptocap *cap; 825 int result, hint; 826 827 EVENTHANDLER_REGISTER(shutdown_pre_sync, crypto_shutdown, NULL, 828 SHUTDOWN_PRI_FIRST); 829 830 CRYPTO_Q_LOCK(); 831 832 for (;;) { 833 /* 834 * Find the first element in the queue that can be 835 * processed and look-ahead to see if multiple ops 836 * are ready for the same driver. 837 */ 838 submit = NULL; 839 hint = 0; 840 TAILQ_FOREACH(crp, &crp_q, crp_next) { 841 u_int32_t hid = SESID2HID(crp->crp_sid); 842 cap = crypto_checkdriver(hid); 843 if (cap == NULL || cap->cc_process == NULL) { 844 /* Op needs to be migrated, process it. */ 845 if (submit == NULL) 846 submit = crp; 847 break; 848 } 849 if (!cap->cc_qblocked) { 850 if (submit != NULL) { 851 /* 852 * We stop on finding another op, 853 * regardless whether its for the same 854 * driver or not. We could keep 855 * searching the queue but it might be 856 * better to just use a per-driver 857 * queue instead. 858 */ 859 if (SESID2HID(submit->crp_sid) == hid) 860 hint = CRYPTO_HINT_MORE; 861 break; 862 } else { 863 submit = crp; 864 if (submit->crp_flags & CRYPTO_F_NODELAY) 865 break; 866 /* keep scanning for more are q'd */ 867 } 868 } 869 } 870 if (submit != NULL) { 871 TAILQ_REMOVE(&crp_q, submit, crp_next); 872 result = crypto_invoke(submit, hint); 873 if (result == ERESTART) { 874 /* 875 * The driver ran out of resources, mark the 876 * driver ``blocked'' for cryptop's and put 877 * the request back in the queue. It would 878 * best to put the request back where we got 879 * it but that's hard so for now we put it 880 * at the front. This should be ok; putting 881 * it at the end does not work. 882 */ 883 /* XXX validate sid again? */ 884 crypto_drivers[SESID2HID(submit->crp_sid)].cc_qblocked = 1; 885 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next); 886 } 887 } 888 889 /* As above, but for key ops */ 890 TAILQ_FOREACH(krp, &crp_kq, krp_next) { 891 cap = crypto_checkdriver(krp->krp_hid); 892 if (cap == NULL || cap->cc_kprocess == NULL) { 893 /* Op needs to be migrated, process it. */ 894 break; 895 } 896 if (!cap->cc_kqblocked) 897 break; 898 } 899 if (krp != NULL) { 900 TAILQ_REMOVE(&crp_kq, krp, krp_next); 901 result = crypto_kinvoke(krp, 0); 902 if (result == ERESTART) { 903 /* 904 * The driver ran out of resources, mark the 905 * driver ``blocked'' for cryptkop's and put 906 * the request back in the queue. It would 907 * best to put the request back where we got 908 * it but that's hard so for now we put it 909 * at the front. This should be ok; putting 910 * it at the end does not work. 911 */ 912 /* XXX validate sid again? */ 913 crypto_drivers[krp->krp_hid].cc_kqblocked = 1; 914 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next); 915 } 916 } 917 918 if (submit == NULL && krp == NULL) { 919 /* 920 * Nothing more to be processed. Sleep until we're 921 * woken because there are more ops to process. 922 * This happens either by submission or by a driver 923 * becoming unblocked and notifying us through 924 * crypto_unblock. Note that when we wakeup we 925 * start processing each queue again from the 926 * front. It's not clear that it's important to 927 * preserve this ordering since ops may finish 928 * out of order if dispatched to different devices 929 * and some become blocked while others do not. 930 */ 931 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0); 932 } 933 } 934 } 935 static struct kproc_desc crypto_kp = { 936 "crypto", 937 crypto_proc, 938 &cryptoproc 939 }; 940 SYSINIT(crypto_proc, SI_SUB_KTHREAD_IDLE, SI_ORDER_THIRD, 941 kproc_start, &crypto_kp) 942 943 static struct proc *cryptoretproc; 944 945 static void 946 crypto_ret_shutdown(void *arg, int howto) 947 { 948 /* XXX flush queues */ 949 } 950 951 /* 952 * Crypto returns thread, does callbacks for processed crypto requests. 953 * Callbacks are done here, rather than in the crypto drivers, because 954 * callbacks typically are expensive and would slow interrupt handling. 955 */ 956 static void 957 crypto_ret_proc(void) 958 { 959 struct cryptop *crpt; 960 struct cryptkop *krpt; 961 962 EVENTHANDLER_REGISTER(shutdown_pre_sync, crypto_ret_shutdown, NULL, 963 SHUTDOWN_PRI_FIRST); 964 965 CRYPTO_RETQ_LOCK(); 966 967 for (;;) { 968 /* Harvest return q's for completed ops */ 969 crpt = TAILQ_FIRST(&crp_ret_q); 970 if (crpt != NULL) 971 TAILQ_REMOVE(&crp_ret_q, crpt, crp_next); 972 973 krpt = TAILQ_FIRST(&crp_ret_kq); 974 if (krpt != NULL) 975 TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next); 976 977 if (crpt != NULL || krpt != NULL) { 978 CRYPTO_RETQ_UNLOCK(); 979 /* 980 * Run callbacks unlocked. 981 */ 982 if (crpt != NULL) 983 crpt->crp_callback(crpt); 984 if (krpt != NULL) 985 krpt->krp_callback(krpt); 986 CRYPTO_RETQ_LOCK(); 987 } else { 988 /* 989 * Nothing more to be processed. Sleep until we're 990 * woken because there are more returns to process. 991 */ 992 msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT, 993 "crypto_ret_wait", 0); 994 } 995 } 996 } 997 static struct kproc_desc crypto_ret_kp = { 998 "crypto returns", 999 crypto_ret_proc, 1000 &cryptoretproc 1001 }; 1002 SYSINIT(crypto_ret_proc, SI_SUB_KTHREAD_IDLE, SI_ORDER_THIRD, 1003 kproc_start, &crypto_ret_kp) 1004