1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file contains the core framework routines for the 28 * kernel cryptographic framework. These routines are at the 29 * layer, between the kernel API/ioctls and the SPI. 30 */ 31 32 #include <sys/types.h> 33 #include <sys/errno.h> 34 #include <sys/kmem.h> 35 #include <sys/proc.h> 36 #include <sys/cpuvar.h> 37 #include <sys/cpupart.h> 38 #include <sys/ksynch.h> 39 #include <sys/callb.h> 40 #include <sys/cmn_err.h> 41 #include <sys/systm.h> 42 #include <sys/sysmacros.h> 43 #include <sys/kstat.h> 44 #include <sys/crypto/common.h> 45 #include <sys/crypto/impl.h> 46 #include <sys/crypto/sched_impl.h> 47 #include <sys/crypto/api.h> 48 #include <sys/crypto/spi.h> 49 #include <sys/taskq_impl.h> 50 #include <sys/ddi.h> 51 #include <sys/sunddi.h> 52 53 54 kcf_global_swq_t *gswq; /* Global software queue */ 55 56 /* Thread pool related variables */ 57 static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */ 58 int kcf_maxthreads = 2; 59 int kcf_minthreads = 1; 60 int kcf_thr_multiple = 2; /* Boot-time tunable for experimentation */ 61 static ulong_t kcf_idlethr_timeout; 62 static boolean_t kcf_sched_running = B_FALSE; 63 #define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */ 64 65 /* kmem caches used by the scheduler */ 66 static struct kmem_cache *kcf_sreq_cache; 67 static struct kmem_cache *kcf_areq_cache; 68 static struct kmem_cache *kcf_context_cache; 69 70 /* Global request ID table */ 71 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES]; 72 73 /* KCF stats. Not protected. */ 74 static kcf_stats_t kcf_ksdata = { 75 { "total threads in pool", KSTAT_DATA_UINT32}, 76 { "idle threads in pool", KSTAT_DATA_UINT32}, 77 { "min threads in pool", KSTAT_DATA_UINT32}, 78 { "max threads in pool", KSTAT_DATA_UINT32}, 79 { "requests in gswq", KSTAT_DATA_UINT32}, 80 { "max requests in gswq", KSTAT_DATA_UINT32}, 81 { "threads for HW taskq", KSTAT_DATA_UINT32}, 82 { "minalloc for HW taskq", KSTAT_DATA_UINT32}, 83 { "maxalloc for HW taskq", KSTAT_DATA_UINT32} 84 }; 85 86 static kstat_t *kcf_misc_kstat = NULL; 87 ulong_t kcf_swprov_hndl = 0; 88 89 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *, 90 kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t); 91 static int kcf_disp_sw_request(kcf_areq_node_t *); 92 static void process_req_hwp(void *); 93 static kcf_areq_node_t *kcf_dequeue(); 94 static int kcf_enqueue(kcf_areq_node_t *); 95 static void kcf_failover_thread(); 96 static void kcfpool_alloc(); 97 static void kcf_reqid_delete(kcf_areq_node_t *areq); 98 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq); 99 static int kcf_misc_kstat_update(kstat_t *ksp, int rw); 100 static void compute_min_max_threads(); 101 102 103 /* 104 * Create a new context. 105 */ 106 crypto_ctx_t * 107 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd, 108 crypto_session_id_t sid) 109 { 110 crypto_ctx_t *ctx; 111 kcf_context_t *kcf_ctx; 112 113 kcf_ctx = kmem_cache_alloc(kcf_context_cache, 114 (crq == NULL) ? KM_SLEEP : KM_NOSLEEP); 115 if (kcf_ctx == NULL) 116 return (NULL); 117 118 /* initialize the context for the consumer */ 119 kcf_ctx->kc_refcnt = 1; 120 kcf_ctx->kc_req_chain_first = NULL; 121 kcf_ctx->kc_req_chain_last = NULL; 122 kcf_ctx->kc_secondctx = NULL; 123 KCF_PROV_REFHOLD(pd); 124 kcf_ctx->kc_prov_desc = pd; 125 kcf_ctx->kc_sw_prov_desc = NULL; 126 kcf_ctx->kc_mech = NULL; 127 128 ctx = &kcf_ctx->kc_glbl_ctx; 129 ctx->cc_provider = pd->pd_prov_handle; 130 ctx->cc_session = sid; 131 ctx->cc_provider_private = NULL; 132 ctx->cc_framework_private = (void *)kcf_ctx; 133 ctx->cc_flags = 0; 134 ctx->cc_opstate = NULL; 135 136 return (ctx); 137 } 138 139 /* 140 * Allocate a new async request node. 141 * 142 * ictx - Framework private context pointer 143 * crq - Has callback function and argument. Should be non NULL. 144 * req - The parameters to pass to the SPI 145 */ 146 static kcf_areq_node_t * 147 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx, 148 crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual) 149 { 150 kcf_areq_node_t *arptr, *areq; 151 152 ASSERT(crq != NULL); 153 arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP); 154 if (arptr == NULL) 155 return (NULL); 156 157 arptr->an_state = REQ_ALLOCATED; 158 arptr->an_reqarg = *crq; 159 arptr->an_params = *req; 160 arptr->an_context = ictx; 161 arptr->an_isdual = isdual; 162 163 arptr->an_next = arptr->an_prev = NULL; 164 KCF_PROV_REFHOLD(pd); 165 arptr->an_provider = pd; 166 arptr->an_tried_plist = NULL; 167 arptr->an_refcnt = 1; 168 arptr->an_idnext = arptr->an_idprev = NULL; 169 170 /* 171 * Requests for context-less operations do not use the 172 * fields - an_is_my_turn, and an_ctxchain_next. 173 */ 174 if (ictx == NULL) 175 return (arptr); 176 177 KCF_CONTEXT_REFHOLD(ictx); 178 /* 179 * Chain this request to the context. 180 */ 181 mutex_enter(&ictx->kc_in_use_lock); 182 arptr->an_ctxchain_next = NULL; 183 if ((areq = ictx->kc_req_chain_last) == NULL) { 184 arptr->an_is_my_turn = B_TRUE; 185 ictx->kc_req_chain_last = 186 ictx->kc_req_chain_first = arptr; 187 } else { 188 ASSERT(ictx->kc_req_chain_first != NULL); 189 arptr->an_is_my_turn = B_FALSE; 190 /* Insert the new request to the end of the chain. */ 191 areq->an_ctxchain_next = arptr; 192 ictx->kc_req_chain_last = arptr; 193 } 194 mutex_exit(&ictx->kc_in_use_lock); 195 196 return (arptr); 197 } 198 199 /* 200 * Queue the request node and do one of the following: 201 * - If there is an idle thread signal it to run. 202 * - If there is no idle thread and max running threads is not 203 * reached, signal the creator thread for more threads. 204 * 205 * If the two conditions above are not met, we don't need to do 206 * any thing. The request will be picked up by one of the 207 * worker threads when it becomes available. 208 */ 209 static int 210 kcf_disp_sw_request(kcf_areq_node_t *areq) 211 { 212 int err; 213 int cnt = 0; 214 215 if ((err = kcf_enqueue(areq)) != 0) 216 return (err); 217 218 if (kcfpool->kp_idlethreads > 0) { 219 /* Signal an idle thread to run */ 220 mutex_enter(&gswq->gs_lock); 221 cv_signal(&gswq->gs_cv); 222 mutex_exit(&gswq->gs_lock); 223 224 return (CRYPTO_QUEUED); 225 } 226 227 /* 228 * We keep the number of running threads to be at 229 * kcf_minthreads to reduce gs_lock contention. 230 */ 231 cnt = kcf_minthreads - 232 (kcfpool->kp_threads - kcfpool->kp_blockedthreads); 233 if (cnt > 0) { 234 /* 235 * The following ensures the number of threads in pool 236 * does not exceed kcf_maxthreads. 237 */ 238 cnt = min(cnt, kcf_maxthreads - kcfpool->kp_threads); 239 if (cnt > 0) { 240 /* Signal the creator thread for more threads */ 241 mutex_enter(&kcfpool->kp_user_lock); 242 if (!kcfpool->kp_signal_create_thread) { 243 kcfpool->kp_signal_create_thread = B_TRUE; 244 kcfpool->kp_nthrs = cnt; 245 cv_signal(&kcfpool->kp_user_cv); 246 } 247 mutex_exit(&kcfpool->kp_user_lock); 248 } 249 } 250 251 return (CRYPTO_QUEUED); 252 } 253 254 /* 255 * This routine is called by the taskq associated with 256 * each hardware provider. We notify the kernel consumer 257 * via the callback routine in case of CRYPTO_SUCCESS or 258 * a failure. 259 * 260 * A request can be of type kcf_areq_node_t or of type 261 * kcf_sreq_node_t. 262 */ 263 static void 264 process_req_hwp(void *ireq) 265 { 266 int error = 0; 267 crypto_ctx_t *ctx; 268 kcf_call_type_t ctype; 269 kcf_provider_desc_t *pd; 270 kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq; 271 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq; 272 273 pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ? 274 sreq->sn_provider : areq->an_provider; 275 276 /* 277 * Wait if flow control is in effect for the provider. A 278 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED 279 * notification will signal us. We also get signaled if 280 * the provider is unregistering. 281 */ 282 if (pd->pd_state == KCF_PROV_BUSY) { 283 mutex_enter(&pd->pd_lock); 284 while (pd->pd_state == KCF_PROV_BUSY) 285 cv_wait(&pd->pd_resume_cv, &pd->pd_lock); 286 mutex_exit(&pd->pd_lock); 287 } 288 289 /* 290 * Bump the internal reference count while the request is being 291 * processed. This is how we know when it's safe to unregister 292 * a provider. This step must precede the pd_state check below. 293 */ 294 KCF_PROV_IREFHOLD(pd); 295 296 /* 297 * Fail the request if the provider has failed. We return a 298 * recoverable error and the notified clients attempt any 299 * recovery. For async clients this is done in kcf_aop_done() 300 * and for sync clients it is done in the k-api routines. 301 */ 302 if (pd->pd_state >= KCF_PROV_FAILED) { 303 error = CRYPTO_DEVICE_ERROR; 304 goto bail; 305 } 306 307 if (ctype == CRYPTO_SYNCH) { 308 mutex_enter(&sreq->sn_lock); 309 sreq->sn_state = REQ_INPROGRESS; 310 mutex_exit(&sreq->sn_lock); 311 312 ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL; 313 error = common_submit_request(sreq->sn_provider, ctx, 314 sreq->sn_params, sreq); 315 } else { 316 kcf_context_t *ictx; 317 ASSERT(ctype == CRYPTO_ASYNCH); 318 319 /* 320 * We are in the per-hardware provider thread context and 321 * hence can sleep. Note that the caller would have done 322 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned. 323 */ 324 ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL; 325 326 mutex_enter(&areq->an_lock); 327 /* 328 * We need to maintain ordering for multi-part requests. 329 * an_is_my_turn is set to B_TRUE initially for a request 330 * when it is enqueued and there are no other requests 331 * for that context. It is set later from kcf_aop_done() when 332 * the request before us in the chain of requests for the 333 * context completes. We get signaled at that point. 334 */ 335 if (ictx != NULL) { 336 ASSERT(ictx->kc_prov_desc == areq->an_provider); 337 338 while (areq->an_is_my_turn == B_FALSE) { 339 cv_wait(&areq->an_turn_cv, &areq->an_lock); 340 } 341 } 342 areq->an_state = REQ_INPROGRESS; 343 mutex_exit(&areq->an_lock); 344 345 error = common_submit_request(areq->an_provider, ctx, 346 &areq->an_params, areq); 347 } 348 349 bail: 350 if (error == CRYPTO_QUEUED) { 351 /* 352 * The request is queued by the provider and we should 353 * get a crypto_op_notification() from the provider later. 354 * We notify the consumer at that time. 355 */ 356 return; 357 } else { /* CRYPTO_SUCCESS or other failure */ 358 KCF_PROV_IREFRELE(pd); 359 if (ctype == CRYPTO_SYNCH) 360 kcf_sop_done(sreq, error); 361 else 362 kcf_aop_done(areq, error); 363 } 364 } 365 366 /* 367 * This routine checks if a request can be retried on another 368 * provider. If true, mech1 is initialized to point to the mechanism 369 * structure. mech2 is also initialized in case of a dual operation. fg 370 * is initialized to the correct crypto_func_group_t bit flag. They are 371 * initialized by this routine, so that the caller can pass them to a 372 * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change. 373 * 374 * We check that the request is for a init or atomic routine and that 375 * it is for one of the operation groups used from k-api . 376 */ 377 static boolean_t 378 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1, 379 crypto_mechanism_t **mech2, crypto_func_group_t *fg) 380 { 381 kcf_req_params_t *params; 382 kcf_op_type_t optype; 383 384 params = &areq->an_params; 385 optype = params->rp_optype; 386 387 if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype))) 388 return (B_FALSE); 389 390 switch (params->rp_opgrp) { 391 case KCF_OG_DIGEST: { 392 kcf_digest_ops_params_t *dops = ¶ms->rp_u.digest_params; 393 394 dops->do_mech.cm_type = dops->do_framework_mechtype; 395 *mech1 = &dops->do_mech; 396 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST : 397 CRYPTO_FG_DIGEST_ATOMIC; 398 break; 399 } 400 401 case KCF_OG_MAC: { 402 kcf_mac_ops_params_t *mops = ¶ms->rp_u.mac_params; 403 404 mops->mo_mech.cm_type = mops->mo_framework_mechtype; 405 *mech1 = &mops->mo_mech; 406 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC : 407 CRYPTO_FG_MAC_ATOMIC; 408 break; 409 } 410 411 case KCF_OG_SIGN: { 412 kcf_sign_ops_params_t *sops = ¶ms->rp_u.sign_params; 413 414 sops->so_mech.cm_type = sops->so_framework_mechtype; 415 *mech1 = &sops->so_mech; 416 switch (optype) { 417 case KCF_OP_INIT: 418 *fg = CRYPTO_FG_SIGN; 419 break; 420 case KCF_OP_ATOMIC: 421 *fg = CRYPTO_FG_SIGN_ATOMIC; 422 break; 423 default: 424 ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC); 425 *fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC; 426 } 427 break; 428 } 429 430 case KCF_OG_VERIFY: { 431 kcf_verify_ops_params_t *vops = ¶ms->rp_u.verify_params; 432 433 vops->vo_mech.cm_type = vops->vo_framework_mechtype; 434 *mech1 = &vops->vo_mech; 435 switch (optype) { 436 case KCF_OP_INIT: 437 *fg = CRYPTO_FG_VERIFY; 438 break; 439 case KCF_OP_ATOMIC: 440 *fg = CRYPTO_FG_VERIFY_ATOMIC; 441 break; 442 default: 443 ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC); 444 *fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC; 445 } 446 break; 447 } 448 449 case KCF_OG_ENCRYPT: { 450 kcf_encrypt_ops_params_t *eops = ¶ms->rp_u.encrypt_params; 451 452 eops->eo_mech.cm_type = eops->eo_framework_mechtype; 453 *mech1 = &eops->eo_mech; 454 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT : 455 CRYPTO_FG_ENCRYPT_ATOMIC; 456 break; 457 } 458 459 case KCF_OG_DECRYPT: { 460 kcf_decrypt_ops_params_t *dcrops = ¶ms->rp_u.decrypt_params; 461 462 dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype; 463 *mech1 = &dcrops->dop_mech; 464 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT : 465 CRYPTO_FG_DECRYPT_ATOMIC; 466 break; 467 } 468 469 case KCF_OG_ENCRYPT_MAC: { 470 kcf_encrypt_mac_ops_params_t *eops = 471 ¶ms->rp_u.encrypt_mac_params; 472 473 eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype; 474 *mech1 = &eops->em_encr_mech; 475 eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype; 476 *mech2 = &eops->em_mac_mech; 477 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC : 478 CRYPTO_FG_ENCRYPT_MAC_ATOMIC; 479 break; 480 } 481 482 case KCF_OG_MAC_DECRYPT: { 483 kcf_mac_decrypt_ops_params_t *dops = 484 ¶ms->rp_u.mac_decrypt_params; 485 486 dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype; 487 *mech1 = &dops->md_mac_mech; 488 dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype; 489 *mech2 = &dops->md_decr_mech; 490 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT : 491 CRYPTO_FG_MAC_DECRYPT_ATOMIC; 492 break; 493 } 494 495 default: 496 return (B_FALSE); 497 } 498 499 return (B_TRUE); 500 } 501 502 /* 503 * This routine is called when a request to a provider has failed 504 * with a recoverable error. This routine tries to find another provider 505 * and dispatches the request to the new provider, if one is available. 506 * We reuse the request structure. 507 * 508 * A return value of NULL from kcf_get_mech_provider() indicates 509 * we have tried the last provider. 510 */ 511 static int 512 kcf_resubmit_request(kcf_areq_node_t *areq) 513 { 514 int error = CRYPTO_FAILED; 515 kcf_context_t *ictx; 516 kcf_provider_desc_t *old_pd; 517 kcf_provider_desc_t *new_pd; 518 crypto_mechanism_t *mech1 = NULL, *mech2 = NULL; 519 crypto_mech_type_t prov_mt1, prov_mt2; 520 crypto_func_group_t fg; 521 522 if (!can_resubmit(areq, &mech1, &mech2, &fg)) 523 return (error); 524 525 old_pd = areq->an_provider; 526 /* 527 * Add old_pd to the list of providers already tried. We release 528 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in 529 * kcf_free_triedlist(). 530 */ 531 if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd, 532 KM_NOSLEEP) == NULL) 533 return (error); 534 535 if (mech1 && !mech2) { 536 new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error, 537 areq->an_tried_plist, fg, 538 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); 539 } else { 540 ASSERT(mech1 != NULL && mech2 != NULL); 541 542 new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1, 543 &prov_mt2, &error, areq->an_tried_plist, fg, fg, 544 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); 545 } 546 547 if (new_pd == NULL) 548 return (error); 549 550 /* 551 * We reuse the old context by resetting provider specific 552 * fields in it. 553 */ 554 if ((ictx = areq->an_context) != NULL) { 555 crypto_ctx_t *ctx; 556 557 ASSERT(old_pd == ictx->kc_prov_desc); 558 KCF_PROV_REFRELE(ictx->kc_prov_desc); 559 KCF_PROV_REFHOLD(new_pd); 560 ictx->kc_prov_desc = new_pd; 561 562 ctx = &ictx->kc_glbl_ctx; 563 ctx->cc_provider = new_pd->pd_prov_handle; 564 ctx->cc_session = new_pd->pd_sid; 565 ctx->cc_provider_private = NULL; 566 } 567 568 /* We reuse areq. by resetting the provider and context fields. */ 569 KCF_PROV_REFRELE(old_pd); 570 KCF_PROV_REFHOLD(new_pd); 571 areq->an_provider = new_pd; 572 mutex_enter(&areq->an_lock); 573 areq->an_state = REQ_WAITING; 574 mutex_exit(&areq->an_lock); 575 576 switch (new_pd->pd_prov_type) { 577 case CRYPTO_SW_PROVIDER: 578 error = kcf_disp_sw_request(areq); 579 break; 580 581 case CRYPTO_HW_PROVIDER: { 582 taskq_t *taskq = new_pd->pd_sched_info.ks_taskq; 583 584 if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == 585 (taskqid_t)0) { 586 error = CRYPTO_HOST_MEMORY; 587 } else { 588 error = CRYPTO_QUEUED; 589 } 590 591 break; 592 } 593 } 594 595 return (error); 596 } 597 598 #define EMPTY_TASKQ(tq) ((tq)->tq_task.tqent_next == &(tq)->tq_task) 599 600 /* 601 * Routine called by both ioctl and k-api. The consumer should 602 * bundle the parameters into a kcf_req_params_t structure. A bunch 603 * of macros are available in ops_impl.h for this bundling. They are: 604 * 605 * KCF_WRAP_DIGEST_OPS_PARAMS() 606 * KCF_WRAP_MAC_OPS_PARAMS() 607 * KCF_WRAP_ENCRYPT_OPS_PARAMS() 608 * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc. 609 * 610 * It is the caller's responsibility to free the ctx argument when 611 * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details. 612 */ 613 int 614 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 615 crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont) 616 { 617 int error = CRYPTO_SUCCESS; 618 kcf_areq_node_t *areq; 619 kcf_sreq_node_t *sreq; 620 kcf_context_t *kcf_ctx; 621 taskq_t *taskq = pd->pd_sched_info.ks_taskq; 622 623 kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL; 624 625 /* Synchronous cases */ 626 if (crq == NULL) { 627 switch (pd->pd_prov_type) { 628 case CRYPTO_SW_PROVIDER: 629 error = common_submit_request(pd, ctx, params, 630 KCF_RHNDL(KM_SLEEP)); 631 break; 632 633 case CRYPTO_HW_PROVIDER: 634 sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP); 635 sreq->sn_state = REQ_ALLOCATED; 636 sreq->sn_rv = CRYPTO_FAILED; 637 sreq->sn_params = params; 638 639 /* 640 * Note that we do not need to hold the context 641 * for synchronous case as the context will never 642 * become invalid underneath us. We do not need to hold 643 * the provider here either as the caller has a hold. 644 */ 645 sreq->sn_context = kcf_ctx; 646 ASSERT(KCF_PROV_REFHELD(pd)); 647 sreq->sn_provider = pd; 648 649 ASSERT(taskq != NULL); 650 /* 651 * Call the SPI directly if the taskq is empty and the 652 * provider is not busy, else dispatch to the taskq. 653 * Calling directly is fine as this is the synchronous 654 * case. This is unlike the asynchronous case where we 655 * must always dispatch to the taskq. 656 */ 657 if (EMPTY_TASKQ(taskq) && 658 pd->pd_state == KCF_PROV_READY) { 659 process_req_hwp(sreq); 660 } else { 661 /* 662 * We can not tell from taskq_dispatch() return 663 * value if we exceeded maxalloc. Hence the 664 * check here. Since we are allowed to wait in 665 * the synchronous case, we wait for the taskq 666 * to become empty. 667 */ 668 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { 669 taskq_wait(taskq); 670 } 671 672 (void) taskq_dispatch(taskq, process_req_hwp, 673 sreq, TQ_SLEEP); 674 } 675 676 /* 677 * Wait for the notification to arrive, 678 * if the operation is not done yet. 679 * Bug# 4722589 will make the wait a cv_wait_sig(). 680 */ 681 mutex_enter(&sreq->sn_lock); 682 while (sreq->sn_state < REQ_DONE) 683 cv_wait(&sreq->sn_cv, &sreq->sn_lock); 684 mutex_exit(&sreq->sn_lock); 685 686 error = sreq->sn_rv; 687 kmem_cache_free(kcf_sreq_cache, sreq); 688 689 break; 690 691 default: 692 error = CRYPTO_FAILED; 693 break; 694 } 695 696 } else { /* Asynchronous cases */ 697 switch (pd->pd_prov_type) { 698 case CRYPTO_SW_PROVIDER: 699 if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) { 700 /* 701 * This case has less overhead since there is 702 * no switching of context. 703 */ 704 error = common_submit_request(pd, ctx, params, 705 KCF_RHNDL(KM_NOSLEEP)); 706 } else { 707 /* 708 * CRYPTO_ALWAYS_QUEUE is set. We need to 709 * queue the request and return. 710 */ 711 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, 712 params, cont); 713 if (areq == NULL) 714 error = CRYPTO_HOST_MEMORY; 715 else { 716 if (!(crq->cr_flag 717 & CRYPTO_SKIP_REQID)) { 718 /* 719 * Set the request handle. This handle 720 * is used for any crypto_cancel_req(9f) 721 * calls from the consumer. We have to 722 * do this before dispatching the 723 * request. 724 */ 725 crq->cr_reqid = kcf_reqid_insert(areq); 726 } 727 728 error = kcf_disp_sw_request(areq); 729 /* 730 * There is an error processing this 731 * request. Remove the handle and 732 * release the request structure. 733 */ 734 if (error != CRYPTO_QUEUED) { 735 if (!(crq->cr_flag 736 & CRYPTO_SKIP_REQID)) 737 kcf_reqid_delete(areq); 738 KCF_AREQ_REFRELE(areq); 739 } 740 } 741 } 742 break; 743 744 case CRYPTO_HW_PROVIDER: 745 /* 746 * We need to queue the request and return. 747 */ 748 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params, 749 cont); 750 if (areq == NULL) { 751 error = CRYPTO_HOST_MEMORY; 752 goto done; 753 } 754 755 ASSERT(taskq != NULL); 756 /* 757 * We can not tell from taskq_dispatch() return 758 * value if we exceeded maxalloc. Hence the check 759 * here. 760 */ 761 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { 762 error = CRYPTO_BUSY; 763 KCF_AREQ_REFRELE(areq); 764 goto done; 765 } 766 767 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) { 768 /* 769 * Set the request handle. This handle is used 770 * for any crypto_cancel_req(9f) calls from the 771 * consumer. We have to do this before dispatching 772 * the request. 773 */ 774 crq->cr_reqid = kcf_reqid_insert(areq); 775 } 776 777 if (taskq_dispatch(taskq, 778 process_req_hwp, areq, TQ_NOSLEEP) == 779 (taskqid_t)0) { 780 error = CRYPTO_HOST_MEMORY; 781 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) 782 kcf_reqid_delete(areq); 783 KCF_AREQ_REFRELE(areq); 784 } else { 785 error = CRYPTO_QUEUED; 786 } 787 break; 788 789 default: 790 error = CRYPTO_FAILED; 791 break; 792 } 793 } 794 795 done: 796 return (error); 797 } 798 799 /* 800 * We're done with this framework context, so free it. Note that freeing 801 * framework context (kcf_context) frees the global context (crypto_ctx). 802 * 803 * The provider is responsible for freeing provider private context after a 804 * final or single operation and resetting the cc_provider_private field 805 * to NULL. It should do this before it notifies the framework of the 806 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases 807 * like crypto_cancel_ctx(9f). 808 */ 809 void 810 kcf_free_context(kcf_context_t *kcf_ctx) 811 { 812 kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc; 813 crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx; 814 kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx; 815 816 /* Release the second context, if any */ 817 818 if (kcf_secondctx != NULL) 819 KCF_CONTEXT_REFRELE(kcf_secondctx); 820 821 if (gctx->cc_provider_private != NULL) { 822 mutex_enter(&pd->pd_lock); 823 if (!KCF_IS_PROV_REMOVED(pd)) { 824 /* 825 * Increment the provider's internal refcnt so it 826 * doesn't unregister from the framework while 827 * we're calling the entry point. 828 */ 829 KCF_PROV_IREFHOLD(pd); 830 mutex_exit(&pd->pd_lock); 831 (void) KCF_PROV_FREE_CONTEXT(pd, gctx); 832 KCF_PROV_IREFRELE(pd); 833 } else { 834 mutex_exit(&pd->pd_lock); 835 } 836 } 837 838 /* kcf_ctx->kc_prov_desc has a hold on pd */ 839 KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc); 840 841 /* check if this context is shared with a software provider */ 842 if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) && 843 kcf_ctx->kc_sw_prov_desc != NULL) { 844 KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc); 845 } 846 847 kmem_cache_free(kcf_context_cache, kcf_ctx); 848 } 849 850 /* 851 * Free the request after releasing all the holds. 852 */ 853 void 854 kcf_free_req(kcf_areq_node_t *areq) 855 { 856 KCF_PROV_REFRELE(areq->an_provider); 857 if (areq->an_context != NULL) 858 KCF_CONTEXT_REFRELE(areq->an_context); 859 860 if (areq->an_tried_plist != NULL) 861 kcf_free_triedlist(areq->an_tried_plist); 862 kmem_cache_free(kcf_areq_cache, areq); 863 } 864 865 /* 866 * Utility routine to remove a request from the chain of requests 867 * hanging off a context. 868 */ 869 void 870 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq) 871 { 872 kcf_areq_node_t *cur, *prev; 873 874 /* 875 * Get context lock, search for areq in the chain and remove it. 876 */ 877 ASSERT(ictx != NULL); 878 mutex_enter(&ictx->kc_in_use_lock); 879 prev = cur = ictx->kc_req_chain_first; 880 881 while (cur != NULL) { 882 if (cur == areq) { 883 if (prev == cur) { 884 if ((ictx->kc_req_chain_first = 885 cur->an_ctxchain_next) == NULL) 886 ictx->kc_req_chain_last = NULL; 887 } else { 888 if (cur == ictx->kc_req_chain_last) 889 ictx->kc_req_chain_last = prev; 890 prev->an_ctxchain_next = cur->an_ctxchain_next; 891 } 892 893 break; 894 } 895 prev = cur; 896 cur = cur->an_ctxchain_next; 897 } 898 mutex_exit(&ictx->kc_in_use_lock); 899 } 900 901 /* 902 * Remove the specified node from the global software queue. 903 * 904 * The caller must hold the queue lock and request lock (an_lock). 905 */ 906 void 907 kcf_remove_node(kcf_areq_node_t *node) 908 { 909 kcf_areq_node_t *nextp = node->an_next; 910 kcf_areq_node_t *prevp = node->an_prev; 911 912 ASSERT(mutex_owned(&gswq->gs_lock)); 913 914 if (nextp != NULL) 915 nextp->an_prev = prevp; 916 else 917 gswq->gs_last = prevp; 918 919 if (prevp != NULL) 920 prevp->an_next = nextp; 921 else 922 gswq->gs_first = nextp; 923 924 ASSERT(mutex_owned(&node->an_lock)); 925 node->an_state = REQ_CANCELED; 926 } 927 928 /* 929 * Remove and return the first node in the global software queue. 930 * 931 * The caller must hold the queue lock. 932 */ 933 static kcf_areq_node_t * 934 kcf_dequeue() 935 { 936 kcf_areq_node_t *tnode = NULL; 937 938 ASSERT(mutex_owned(&gswq->gs_lock)); 939 if ((tnode = gswq->gs_first) == NULL) { 940 return (NULL); 941 } else { 942 ASSERT(gswq->gs_first->an_prev == NULL); 943 gswq->gs_first = tnode->an_next; 944 if (tnode->an_next == NULL) 945 gswq->gs_last = NULL; 946 else 947 tnode->an_next->an_prev = NULL; 948 } 949 950 gswq->gs_njobs--; 951 return (tnode); 952 } 953 954 /* 955 * Add the request node to the end of the global software queue. 956 * 957 * The caller should not hold the queue lock. Returns 0 if the 958 * request is successfully queued. Returns CRYPTO_BUSY if the limit 959 * on the number of jobs is exceeded. 960 */ 961 static int 962 kcf_enqueue(kcf_areq_node_t *node) 963 { 964 kcf_areq_node_t *tnode; 965 966 mutex_enter(&gswq->gs_lock); 967 968 if (gswq->gs_njobs >= gswq->gs_maxjobs) { 969 mutex_exit(&gswq->gs_lock); 970 return (CRYPTO_BUSY); 971 } 972 973 if (gswq->gs_last == NULL) { 974 gswq->gs_first = gswq->gs_last = node; 975 } else { 976 ASSERT(gswq->gs_last->an_next == NULL); 977 tnode = gswq->gs_last; 978 tnode->an_next = node; 979 gswq->gs_last = node; 980 node->an_prev = tnode; 981 } 982 983 gswq->gs_njobs++; 984 985 /* an_lock not needed here as we hold gs_lock */ 986 node->an_state = REQ_WAITING; 987 988 mutex_exit(&gswq->gs_lock); 989 990 return (0); 991 } 992 993 /* 994 * Decrement the thread pool count and signal the failover 995 * thread if we are the last one out. 996 */ 997 static void 998 kcf_decrcnt_andsignal() 999 { 1000 KCF_ATOMIC_DECR(kcfpool->kp_threads); 1001 1002 mutex_enter(&kcfpool->kp_thread_lock); 1003 if (kcfpool->kp_threads == 0) 1004 cv_signal(&kcfpool->kp_nothr_cv); 1005 mutex_exit(&kcfpool->kp_thread_lock); 1006 } 1007 1008 /* 1009 * Function run by a thread from kcfpool to work on global software queue. 1010 * It is called from ioctl(CRYPTO_POOL_RUN, ...). 1011 */ 1012 int 1013 kcf_svc_do_run(void) 1014 { 1015 int error = 0; 1016 clock_t rv; 1017 clock_t timeout_val; 1018 kcf_areq_node_t *req; 1019 kcf_context_t *ictx; 1020 kcf_provider_desc_t *pd; 1021 1022 KCF_ATOMIC_INCR(kcfpool->kp_threads); 1023 1024 for (;;) { 1025 mutex_enter(&gswq->gs_lock); 1026 1027 while ((req = kcf_dequeue()) == NULL) { 1028 timeout_val = ddi_get_lbolt() + 1029 drv_usectohz(kcf_idlethr_timeout); 1030 1031 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads); 1032 rv = cv_timedwait_sig(&gswq->gs_cv, &gswq->gs_lock, 1033 timeout_val); 1034 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads); 1035 1036 switch (rv) { 1037 case 0: 1038 /* 1039 * A signal (as in kill(2)) is pending. We did 1040 * not get any cv_signal(). 1041 */ 1042 kcf_decrcnt_andsignal(); 1043 mutex_exit(&gswq->gs_lock); 1044 return (EINTR); 1045 1046 case -1: 1047 /* 1048 * Timed out and we are not signaled. Let us 1049 * see if this thread should exit. We should 1050 * keep at least kcf_minthreads. 1051 */ 1052 if (kcfpool->kp_threads > kcf_minthreads) { 1053 kcf_decrcnt_andsignal(); 1054 mutex_exit(&gswq->gs_lock); 1055 return (0); 1056 } 1057 1058 /* Resume the wait for work */ 1059 break; 1060 1061 default: 1062 /* 1063 * We are signaled to work on the queue. 1064 */ 1065 break; 1066 } 1067 } 1068 1069 mutex_exit(&gswq->gs_lock); 1070 1071 ictx = req->an_context; 1072 if (ictx == NULL) { /* Context-less operation */ 1073 pd = req->an_provider; 1074 error = common_submit_request(pd, NULL, 1075 &req->an_params, req); 1076 kcf_aop_done(req, error); 1077 continue; 1078 } 1079 1080 /* 1081 * We check if we can work on the request now. 1082 * Solaris does not guarantee any order on how the threads 1083 * are scheduled or how the waiters on a mutex are chosen. 1084 * So, we need to maintain our own order. 1085 * 1086 * is_my_turn is set to B_TRUE initially for a request when 1087 * it is enqueued and there are no other requests 1088 * for that context. Note that a thread sleeping on 1089 * an_turn_cv is not counted as an idle thread. This is 1090 * because we define an idle thread as one that sleeps on the 1091 * global queue waiting for new requests. 1092 */ 1093 mutex_enter(&req->an_lock); 1094 while (req->an_is_my_turn == B_FALSE) { 1095 KCF_ATOMIC_INCR(kcfpool->kp_blockedthreads); 1096 cv_wait(&req->an_turn_cv, &req->an_lock); 1097 KCF_ATOMIC_DECR(kcfpool->kp_blockedthreads); 1098 } 1099 1100 req->an_state = REQ_INPROGRESS; 1101 mutex_exit(&req->an_lock); 1102 1103 pd = ictx->kc_prov_desc; 1104 ASSERT(pd == req->an_provider); 1105 error = common_submit_request(pd, &ictx->kc_glbl_ctx, 1106 &req->an_params, req); 1107 1108 kcf_aop_done(req, error); 1109 } 1110 } 1111 1112 /* 1113 * kmem_cache_alloc constructor for sync request structure. 1114 */ 1115 /* ARGSUSED */ 1116 static int 1117 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags) 1118 { 1119 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; 1120 1121 sreq->sn_type = CRYPTO_SYNCH; 1122 cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL); 1123 mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL); 1124 1125 return (0); 1126 } 1127 1128 /* ARGSUSED */ 1129 static void 1130 kcf_sreq_cache_destructor(void *buf, void *cdrarg) 1131 { 1132 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; 1133 1134 mutex_destroy(&sreq->sn_lock); 1135 cv_destroy(&sreq->sn_cv); 1136 } 1137 1138 /* 1139 * kmem_cache_alloc constructor for async request structure. 1140 */ 1141 /* ARGSUSED */ 1142 static int 1143 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags) 1144 { 1145 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf; 1146 1147 areq->an_type = CRYPTO_ASYNCH; 1148 areq->an_refcnt = 0; 1149 mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL); 1150 cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL); 1151 cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL); 1152 1153 return (0); 1154 } 1155 1156 /* ARGSUSED */ 1157 static void 1158 kcf_areq_cache_destructor(void *buf, void *cdrarg) 1159 { 1160 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf; 1161 1162 ASSERT(areq->an_refcnt == 0); 1163 mutex_destroy(&areq->an_lock); 1164 cv_destroy(&areq->an_done); 1165 cv_destroy(&areq->an_turn_cv); 1166 } 1167 1168 /* 1169 * kmem_cache_alloc constructor for kcf_context structure. 1170 */ 1171 /* ARGSUSED */ 1172 static int 1173 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags) 1174 { 1175 kcf_context_t *kctx = (kcf_context_t *)buf; 1176 1177 kctx->kc_refcnt = 0; 1178 mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL); 1179 1180 return (0); 1181 } 1182 1183 /* ARGSUSED */ 1184 static void 1185 kcf_context_cache_destructor(void *buf, void *cdrarg) 1186 { 1187 kcf_context_t *kctx = (kcf_context_t *)buf; 1188 1189 ASSERT(kctx->kc_refcnt == 0); 1190 mutex_destroy(&kctx->kc_in_use_lock); 1191 } 1192 1193 /* 1194 * Creates and initializes all the structures needed by the framework. 1195 */ 1196 void 1197 kcf_sched_init(void) 1198 { 1199 int i; 1200 kcf_reqid_table_t *rt; 1201 1202 /* 1203 * Create all the kmem caches needed by the framework. We set the 1204 * align argument to 64, to get a slab aligned to 64-byte as well as 1205 * have the objects (cache_chunksize) to be a 64-byte multiple. 1206 * This helps to avoid false sharing as this is the size of the 1207 * CPU cache line. 1208 */ 1209 kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache", 1210 sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor, 1211 kcf_sreq_cache_destructor, NULL, NULL, NULL, 0); 1212 1213 kcf_areq_cache = kmem_cache_create("kcf_areq_cache", 1214 sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor, 1215 kcf_areq_cache_destructor, NULL, NULL, NULL, 0); 1216 1217 kcf_context_cache = kmem_cache_create("kcf_context_cache", 1218 sizeof (struct kcf_context), 64, kcf_context_cache_constructor, 1219 kcf_context_cache_destructor, NULL, NULL, NULL, 0); 1220 1221 mutex_init(&kcf_dh_lock, NULL, MUTEX_DEFAULT, NULL); 1222 1223 gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP); 1224 1225 mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL); 1226 cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL); 1227 gswq->gs_njobs = 0; 1228 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc; 1229 gswq->gs_first = gswq->gs_last = NULL; 1230 1231 /* Initialize the global reqid table */ 1232 for (i = 0; i < REQID_TABLES; i++) { 1233 rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP); 1234 kcf_reqid_table[i] = rt; 1235 mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL); 1236 rt->rt_curid = i; 1237 } 1238 1239 /* Allocate and initialize the thread pool */ 1240 kcfpool_alloc(); 1241 1242 /* Initialize the event notification list variables */ 1243 mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL); 1244 cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL); 1245 1246 /* Initialize the crypto_bufcall list variables */ 1247 mutex_init(&cbuf_list_lock, NULL, MUTEX_DEFAULT, NULL); 1248 cv_init(&cbuf_list_cv, NULL, CV_DEFAULT, NULL); 1249 1250 /* Create the kcf kstat */ 1251 kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto", 1252 KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t), 1253 KSTAT_FLAG_VIRTUAL); 1254 1255 if (kcf_misc_kstat != NULL) { 1256 kcf_misc_kstat->ks_data = &kcf_ksdata; 1257 kcf_misc_kstat->ks_update = kcf_misc_kstat_update; 1258 kstat_install(kcf_misc_kstat); 1259 } 1260 } 1261 1262 /* 1263 * This routine should only be called by drv/cryptoadm. 1264 * 1265 * kcf_sched_running flag isn't protected by a lock. But, we are safe because 1266 * the first thread ("cryptoadm refresh") calling this routine during 1267 * boot time completes before any other thread that can call this routine. 1268 */ 1269 void 1270 kcf_sched_start(void) 1271 { 1272 if (kcf_sched_running) 1273 return; 1274 1275 /* Start the failover kernel thread for now */ 1276 (void) thread_create(NULL, 0, &kcf_failover_thread, 0, 0, &p0, 1277 TS_RUN, minclsyspri); 1278 1279 /* Start the background processing thread. */ 1280 (void) thread_create(NULL, 0, &crypto_bufcall_service, 0, 0, &p0, 1281 TS_RUN, minclsyspri); 1282 1283 kcf_sched_running = B_TRUE; 1284 } 1285 1286 /* 1287 * Signal the waiting sync client. 1288 */ 1289 void 1290 kcf_sop_done(kcf_sreq_node_t *sreq, int error) 1291 { 1292 mutex_enter(&sreq->sn_lock); 1293 sreq->sn_state = REQ_DONE; 1294 sreq->sn_rv = error; 1295 cv_signal(&sreq->sn_cv); 1296 mutex_exit(&sreq->sn_lock); 1297 } 1298 1299 /* 1300 * Callback the async client with the operation status. 1301 * We free the async request node and possibly the context. 1302 * We also handle any chain of requests hanging off of 1303 * the context. 1304 */ 1305 void 1306 kcf_aop_done(kcf_areq_node_t *areq, int error) 1307 { 1308 kcf_op_type_t optype; 1309 boolean_t skip_notify = B_FALSE; 1310 kcf_context_t *ictx; 1311 kcf_areq_node_t *nextreq; 1312 1313 /* 1314 * Handle recoverable errors. This has to be done first 1315 * before doing any thing else in this routine so that 1316 * we do not change the state of the request. 1317 */ 1318 if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { 1319 /* 1320 * We try another provider, if one is available. Else 1321 * we continue with the failure notification to the 1322 * client. 1323 */ 1324 if (kcf_resubmit_request(areq) == CRYPTO_QUEUED) 1325 return; 1326 } 1327 1328 mutex_enter(&areq->an_lock); 1329 areq->an_state = REQ_DONE; 1330 mutex_exit(&areq->an_lock); 1331 1332 optype = (&areq->an_params)->rp_optype; 1333 if ((ictx = areq->an_context) != NULL) { 1334 /* 1335 * A request after it is removed from the request 1336 * queue, still stays on a chain of requests hanging 1337 * of its context structure. It needs to be removed 1338 * from this chain at this point. 1339 */ 1340 mutex_enter(&ictx->kc_in_use_lock); 1341 nextreq = areq->an_ctxchain_next; 1342 if (nextreq != NULL) { 1343 mutex_enter(&nextreq->an_lock); 1344 nextreq->an_is_my_turn = B_TRUE; 1345 cv_signal(&nextreq->an_turn_cv); 1346 mutex_exit(&nextreq->an_lock); 1347 } 1348 1349 ictx->kc_req_chain_first = nextreq; 1350 if (nextreq == NULL) 1351 ictx->kc_req_chain_last = NULL; 1352 mutex_exit(&ictx->kc_in_use_lock); 1353 1354 if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) { 1355 ASSERT(nextreq == NULL); 1356 KCF_CONTEXT_REFRELE(ictx); 1357 } else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) { 1358 /* 1359 * NOTE - We do not release the context in case of update 1360 * operations. We require the consumer to free it explicitly, 1361 * in case it wants to abandon an update operation. This is done 1362 * as there may be mechanisms in ECB mode that can continue 1363 * even if an operation on a block fails. 1364 */ 1365 KCF_CONTEXT_REFRELE(ictx); 1366 } 1367 } 1368 1369 /* Deal with the internal continuation to this request first */ 1370 1371 if (areq->an_isdual) { 1372 kcf_dual_req_t *next_arg; 1373 next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg; 1374 next_arg->kr_areq = areq; 1375 KCF_AREQ_REFHOLD(areq); 1376 areq->an_isdual = B_FALSE; 1377 1378 NOTIFY_CLIENT(areq, error); 1379 return; 1380 } 1381 1382 /* 1383 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify 1384 * always. If this flag is clear, we skip the notification 1385 * provided there are no errors. We check this flag for only 1386 * init or update operations. It is ignored for single, final or 1387 * atomic operations. 1388 */ 1389 skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) && 1390 (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) && 1391 (error == CRYPTO_SUCCESS); 1392 1393 if (!skip_notify) { 1394 NOTIFY_CLIENT(areq, error); 1395 } 1396 1397 if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID)) 1398 kcf_reqid_delete(areq); 1399 1400 KCF_AREQ_REFRELE(areq); 1401 } 1402 1403 /* 1404 * Allocate the thread pool and initialize all the fields. 1405 */ 1406 static void 1407 kcfpool_alloc() 1408 { 1409 kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP); 1410 1411 kcfpool->kp_threads = kcfpool->kp_idlethreads = 0; 1412 kcfpool->kp_blockedthreads = 0; 1413 kcfpool->kp_signal_create_thread = B_FALSE; 1414 kcfpool->kp_nthrs = 0; 1415 kcfpool->kp_user_waiting = B_FALSE; 1416 1417 mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL); 1418 cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL); 1419 1420 mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL); 1421 cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL); 1422 1423 kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT; 1424 } 1425 1426 /* 1427 * This function is run by the 'creator' thread in the pool. 1428 * It is called from ioctl(CRYPTO_POOL_WAIT, ...). 1429 */ 1430 int 1431 kcf_svc_wait(int *nthrs) 1432 { 1433 clock_t rv; 1434 clock_t timeout_val; 1435 1436 if (kcfpool == NULL) 1437 return (ENOENT); 1438 1439 mutex_enter(&kcfpool->kp_user_lock); 1440 /* Check if there's already a user thread waiting on this kcfpool */ 1441 if (kcfpool->kp_user_waiting) { 1442 mutex_exit(&kcfpool->kp_user_lock); 1443 *nthrs = 0; 1444 return (EBUSY); 1445 } 1446 1447 kcfpool->kp_user_waiting = B_TRUE; 1448 1449 /* Go to sleep, waiting for the signaled flag. */ 1450 while (!kcfpool->kp_signal_create_thread) { 1451 timeout_val = ddi_get_lbolt() + 1452 drv_usectohz(kcf_idlethr_timeout); 1453 1454 rv = cv_timedwait_sig(&kcfpool->kp_user_cv, 1455 &kcfpool->kp_user_lock, timeout_val); 1456 switch (rv) { 1457 case 0: 1458 /* Interrupted, return to handle exit or signal */ 1459 kcfpool->kp_user_waiting = B_FALSE; 1460 kcfpool->kp_signal_create_thread = B_FALSE; 1461 mutex_exit(&kcfpool->kp_user_lock); 1462 /* 1463 * kcfd is exiting. Release the door and 1464 * invalidate it. 1465 */ 1466 mutex_enter(&kcf_dh_lock); 1467 if (kcf_dh != NULL) { 1468 door_ki_rele(kcf_dh); 1469 kcf_dh = NULL; 1470 } 1471 mutex_exit(&kcf_dh_lock); 1472 return (EINTR); 1473 1474 case -1: 1475 /* Timed out. Recalculate the min/max threads */ 1476 compute_min_max_threads(); 1477 break; 1478 1479 default: 1480 /* Worker thread did a cv_signal() */ 1481 break; 1482 } 1483 } 1484 1485 kcfpool->kp_signal_create_thread = B_FALSE; 1486 kcfpool->kp_user_waiting = B_FALSE; 1487 1488 *nthrs = kcfpool->kp_nthrs; 1489 mutex_exit(&kcfpool->kp_user_lock); 1490 1491 /* Return to userland for possible thread creation. */ 1492 return (0); 1493 } 1494 1495 1496 /* 1497 * This routine introduces a locking order for gswq->gs_lock followed 1498 * by cpu_lock. 1499 * This means that no consumer of the k-api should hold cpu_lock when calling 1500 * k-api routines. 1501 */ 1502 static void 1503 compute_min_max_threads() 1504 { 1505 mutex_enter(&gswq->gs_lock); 1506 mutex_enter(&cpu_lock); 1507 kcf_minthreads = curthread->t_cpupart->cp_ncpus; 1508 mutex_exit(&cpu_lock); 1509 kcf_maxthreads = kcf_thr_multiple * kcf_minthreads; 1510 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc; 1511 mutex_exit(&gswq->gs_lock); 1512 } 1513 1514 /* 1515 * This is the main routine of the failover kernel thread. 1516 * If there are any threads in the pool we sleep. The last thread in the 1517 * pool to exit will signal us to get to work. We get back to sleep 1518 * once we detect that the pool has threads. 1519 * 1520 * Note that in the hand-off from us to a pool thread we get to run once. 1521 * Since this hand-off is a rare event this should be fine. 1522 */ 1523 static void 1524 kcf_failover_thread() 1525 { 1526 int error = 0; 1527 kcf_context_t *ictx; 1528 kcf_areq_node_t *req; 1529 callb_cpr_t cpr_info; 1530 kmutex_t cpr_lock; 1531 static boolean_t is_logged = B_FALSE; 1532 1533 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 1534 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, 1535 "kcf_failover_thread"); 1536 1537 for (;;) { 1538 /* 1539 * Wait if there are any threads are in the pool. 1540 */ 1541 if (kcfpool->kp_threads > 0) { 1542 mutex_enter(&cpr_lock); 1543 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1544 mutex_exit(&cpr_lock); 1545 1546 mutex_enter(&kcfpool->kp_thread_lock); 1547 cv_wait(&kcfpool->kp_nothr_cv, 1548 &kcfpool->kp_thread_lock); 1549 mutex_exit(&kcfpool->kp_thread_lock); 1550 1551 mutex_enter(&cpr_lock); 1552 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 1553 mutex_exit(&cpr_lock); 1554 is_logged = B_FALSE; 1555 } 1556 1557 /* 1558 * Get the requests from the queue and wait if needed. 1559 */ 1560 mutex_enter(&gswq->gs_lock); 1561 1562 while ((req = kcf_dequeue()) == NULL) { 1563 mutex_enter(&cpr_lock); 1564 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1565 mutex_exit(&cpr_lock); 1566 1567 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads); 1568 cv_wait(&gswq->gs_cv, &gswq->gs_lock); 1569 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads); 1570 1571 mutex_enter(&cpr_lock); 1572 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 1573 mutex_exit(&cpr_lock); 1574 } 1575 1576 mutex_exit(&gswq->gs_lock); 1577 1578 /* 1579 * We check the kp_threads since kcfd could have started 1580 * while we are waiting on the global software queue. 1581 */ 1582 if (kcfpool->kp_threads <= 0 && !is_logged) { 1583 cmn_err(CE_WARN, "kcfd is not running. Please check " 1584 "and restart kcfd. Using the failover kernel " 1585 "thread for now.\n"); 1586 is_logged = B_TRUE; 1587 } 1588 1589 /* 1590 * Get to work on the request. 1591 */ 1592 ictx = req->an_context; 1593 mutex_enter(&req->an_lock); 1594 req->an_state = REQ_INPROGRESS; 1595 mutex_exit(&req->an_lock); 1596 1597 error = common_submit_request(req->an_provider, ictx ? 1598 &ictx->kc_glbl_ctx : NULL, &req->an_params, req); 1599 1600 kcf_aop_done(req, error); 1601 } 1602 } 1603 1604 /* 1605 * Insert the async request in the hash table after assigning it 1606 * an ID. Returns the ID. 1607 * 1608 * The ID is used by the caller to pass as an argument to a 1609 * cancel_req() routine later. 1610 */ 1611 static crypto_req_id_t 1612 kcf_reqid_insert(kcf_areq_node_t *areq) 1613 { 1614 int indx; 1615 crypto_req_id_t id; 1616 kcf_areq_node_t *headp; 1617 kcf_reqid_table_t *rt = 1618 kcf_reqid_table[CPU->cpu_seqid & REQID_TABLE_MASK]; 1619 1620 mutex_enter(&rt->rt_lock); 1621 1622 rt->rt_curid = id = 1623 (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH; 1624 SET_REQID(areq, id); 1625 indx = REQID_HASH(id); 1626 headp = areq->an_idnext = rt->rt_idhash[indx]; 1627 areq->an_idprev = NULL; 1628 if (headp != NULL) 1629 headp->an_idprev = areq; 1630 1631 rt->rt_idhash[indx] = areq; 1632 mutex_exit(&rt->rt_lock); 1633 1634 return (id); 1635 } 1636 1637 /* 1638 * Delete the async request from the hash table. 1639 */ 1640 static void 1641 kcf_reqid_delete(kcf_areq_node_t *areq) 1642 { 1643 int indx; 1644 kcf_areq_node_t *nextp, *prevp; 1645 crypto_req_id_t id = GET_REQID(areq); 1646 kcf_reqid_table_t *rt; 1647 1648 rt = kcf_reqid_table[id & REQID_TABLE_MASK]; 1649 indx = REQID_HASH(id); 1650 1651 mutex_enter(&rt->rt_lock); 1652 1653 nextp = areq->an_idnext; 1654 prevp = areq->an_idprev; 1655 if (nextp != NULL) 1656 nextp->an_idprev = prevp; 1657 if (prevp != NULL) 1658 prevp->an_idnext = nextp; 1659 else 1660 rt->rt_idhash[indx] = nextp; 1661 1662 SET_REQID(areq, 0); 1663 cv_broadcast(&areq->an_done); 1664 1665 mutex_exit(&rt->rt_lock); 1666 } 1667 1668 /* 1669 * Cancel a single asynchronous request. 1670 * 1671 * We guarantee that no problems will result from calling 1672 * crypto_cancel_req() for a request which is either running, or 1673 * has already completed. We remove the request from any queues 1674 * if it is possible. We wait for request completion if the 1675 * request is dispatched to a provider. 1676 * 1677 * Calling context: 1678 * Can be called from user context only. 1679 * 1680 * NOTE: We acquire the following locks in this routine (in order): 1681 * - rt_lock (kcf_reqid_table_t) 1682 * - gswq->gs_lock 1683 * - areq->an_lock 1684 * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain()) 1685 * 1686 * This locking order MUST be maintained in code every where else. 1687 */ 1688 void 1689 crypto_cancel_req(crypto_req_id_t id) 1690 { 1691 int indx; 1692 kcf_areq_node_t *areq; 1693 kcf_provider_desc_t *pd; 1694 kcf_context_t *ictx; 1695 kcf_reqid_table_t *rt; 1696 1697 rt = kcf_reqid_table[id & REQID_TABLE_MASK]; 1698 indx = REQID_HASH(id); 1699 1700 mutex_enter(&rt->rt_lock); 1701 for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) { 1702 if (GET_REQID(areq) == id) { 1703 /* 1704 * We found the request. It is either still waiting 1705 * in the framework queues or running at the provider. 1706 */ 1707 pd = areq->an_provider; 1708 ASSERT(pd != NULL); 1709 1710 switch (pd->pd_prov_type) { 1711 case CRYPTO_SW_PROVIDER: 1712 mutex_enter(&gswq->gs_lock); 1713 mutex_enter(&areq->an_lock); 1714 1715 /* This request can be safely canceled. */ 1716 if (areq->an_state <= REQ_WAITING) { 1717 /* Remove from gswq, global software queue. */ 1718 kcf_remove_node(areq); 1719 if ((ictx = areq->an_context) != NULL) 1720 kcf_removereq_in_ctxchain(ictx, areq); 1721 1722 mutex_exit(&areq->an_lock); 1723 mutex_exit(&gswq->gs_lock); 1724 mutex_exit(&rt->rt_lock); 1725 1726 /* Remove areq from hash table and free it. */ 1727 kcf_reqid_delete(areq); 1728 KCF_AREQ_REFRELE(areq); 1729 return; 1730 } 1731 1732 mutex_exit(&areq->an_lock); 1733 mutex_exit(&gswq->gs_lock); 1734 break; 1735 1736 case CRYPTO_HW_PROVIDER: 1737 /* 1738 * There is no interface to remove an entry 1739 * once it is on the taskq. So, we do not do 1740 * any thing for a hardware provider. 1741 */ 1742 break; 1743 } 1744 1745 /* 1746 * The request is running. Wait for the request completion 1747 * to notify us. 1748 */ 1749 KCF_AREQ_REFHOLD(areq); 1750 while (GET_REQID(areq) == id) 1751 cv_wait(&areq->an_done, &rt->rt_lock); 1752 KCF_AREQ_REFRELE(areq); 1753 break; 1754 } 1755 } 1756 1757 mutex_exit(&rt->rt_lock); 1758 } 1759 1760 /* 1761 * Cancel all asynchronous requests associated with the 1762 * passed in crypto context and free it. 1763 * 1764 * A client SHOULD NOT call this routine after calling a crypto_*_final 1765 * routine. This routine is called only during intermediate operations. 1766 * The client should not use the crypto context after this function returns 1767 * since we destroy it. 1768 * 1769 * Calling context: 1770 * Can be called from user context only. 1771 */ 1772 void 1773 crypto_cancel_ctx(crypto_context_t ctx) 1774 { 1775 kcf_context_t *ictx; 1776 kcf_areq_node_t *areq; 1777 1778 if (ctx == NULL) 1779 return; 1780 1781 ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private; 1782 1783 mutex_enter(&ictx->kc_in_use_lock); 1784 1785 /* Walk the chain and cancel each request */ 1786 while ((areq = ictx->kc_req_chain_first) != NULL) { 1787 /* 1788 * We have to drop the lock here as we may have 1789 * to wait for request completion. We hold the 1790 * request before dropping the lock though, so that it 1791 * won't be freed underneath us. 1792 */ 1793 KCF_AREQ_REFHOLD(areq); 1794 mutex_exit(&ictx->kc_in_use_lock); 1795 1796 crypto_cancel_req(GET_REQID(areq)); 1797 KCF_AREQ_REFRELE(areq); 1798 1799 mutex_enter(&ictx->kc_in_use_lock); 1800 } 1801 1802 mutex_exit(&ictx->kc_in_use_lock); 1803 KCF_CONTEXT_REFRELE(ictx); 1804 } 1805 1806 /* 1807 * Update kstats. 1808 */ 1809 static int 1810 kcf_misc_kstat_update(kstat_t *ksp, int rw) 1811 { 1812 uint_t tcnt; 1813 kcf_stats_t *ks_data; 1814 1815 if (rw == KSTAT_WRITE) 1816 return (EACCES); 1817 1818 ks_data = ksp->ks_data; 1819 1820 ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads; 1821 /* 1822 * The failover thread is counted in kp_idlethreads in 1823 * some corner cases. This is done to avoid doing more checks 1824 * when submitting a request. We account for those cases below. 1825 */ 1826 if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1)) 1827 tcnt--; 1828 ks_data->ks_idle_thrs.value.ui32 = tcnt; 1829 ks_data->ks_minthrs.value.ui32 = kcf_minthreads; 1830 ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads; 1831 ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs; 1832 ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs; 1833 ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads; 1834 ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc; 1835 ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc; 1836 1837 return (0); 1838 } 1839 1840 /* 1841 * Allocate and initiatize a kcf_dual_req, used for saving the arguments of 1842 * a dual operation or an atomic operation that has to be internally 1843 * simulated with multiple single steps. 1844 * crq determines the memory allocation flags. 1845 */ 1846 1847 kcf_dual_req_t * 1848 kcf_alloc_req(crypto_call_req_t *crq) 1849 { 1850 kcf_dual_req_t *kcr; 1851 1852 kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq)); 1853 1854 if (kcr == NULL) 1855 return (NULL); 1856 1857 /* Copy the whole crypto_call_req struct, as it isn't persistant */ 1858 if (crq != NULL) 1859 kcr->kr_callreq = *crq; 1860 else 1861 bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t)); 1862 kcr->kr_areq = NULL; 1863 kcr->kr_saveoffset = 0; 1864 kcr->kr_savelen = 0; 1865 1866 return (kcr); 1867 } 1868 1869 /* 1870 * Callback routine for the next part of a simulated dual part. 1871 * Schedules the next step. 1872 * 1873 * This routine can be called from interrupt context. 1874 */ 1875 void 1876 kcf_next_req(void *next_req_arg, int status) 1877 { 1878 kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg; 1879 kcf_req_params_t *params = &(next_req->kr_params); 1880 kcf_areq_node_t *areq = next_req->kr_areq; 1881 int error = status; 1882 kcf_provider_desc_t *pd; 1883 crypto_dual_data_t *ct; 1884 1885 /* Stop the processing if an error occured at this step */ 1886 if (error != CRYPTO_SUCCESS) { 1887 out: 1888 areq->an_reqarg = next_req->kr_callreq; 1889 KCF_AREQ_REFRELE(areq); 1890 kmem_free(next_req, sizeof (kcf_dual_req_t)); 1891 areq->an_isdual = B_FALSE; 1892 kcf_aop_done(areq, error); 1893 return; 1894 } 1895 1896 switch (params->rp_opgrp) { 1897 case KCF_OG_MAC: { 1898 1899 /* 1900 * The next req is submitted with the same reqid as the 1901 * first part. The consumer only got back that reqid, and 1902 * should still be able to cancel the operation during its 1903 * second step. 1904 */ 1905 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); 1906 crypto_ctx_template_t mac_tmpl; 1907 kcf_mech_entry_t *me; 1908 1909 ct = (crypto_dual_data_t *)mops->mo_data; 1910 mac_tmpl = (crypto_ctx_template_t)mops->mo_templ; 1911 1912 /* No expected recoverable failures, so no retry list */ 1913 pd = kcf_get_mech_provider(mops->mo_framework_mechtype, 1914 &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC, 1915 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2); 1916 1917 if (pd == NULL) { 1918 error = CRYPTO_MECH_NOT_SUPPORTED; 1919 goto out; 1920 } 1921 /* Validate the MAC context template here */ 1922 if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && 1923 (mac_tmpl != NULL)) { 1924 kcf_ctx_template_t *ctx_mac_tmpl; 1925 1926 ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl; 1927 1928 if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) { 1929 KCF_PROV_REFRELE(pd); 1930 error = CRYPTO_OLD_CTX_TEMPLATE; 1931 goto out; 1932 } 1933 mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl; 1934 } 1935 1936 break; 1937 } 1938 case KCF_OG_DECRYPT: { 1939 kcf_decrypt_ops_params_t *dcrops = 1940 &(params->rp_u.decrypt_params); 1941 1942 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; 1943 /* No expected recoverable failures, so no retry list */ 1944 pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype, 1945 NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC, 1946 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1); 1947 1948 if (pd == NULL) { 1949 error = CRYPTO_MECH_NOT_SUPPORTED; 1950 goto out; 1951 } 1952 break; 1953 } 1954 } 1955 1956 /* The second step uses len2 and offset2 of the dual_data */ 1957 next_req->kr_saveoffset = ct->dd_offset1; 1958 next_req->kr_savelen = ct->dd_len1; 1959 ct->dd_offset1 = ct->dd_offset2; 1960 ct->dd_len1 = ct->dd_len2; 1961 1962 /* preserve if the caller is restricted */ 1963 if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) { 1964 areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED; 1965 } else { 1966 areq->an_reqarg.cr_flag = 0; 1967 } 1968 1969 areq->an_reqarg.cr_callback_func = kcf_last_req; 1970 areq->an_reqarg.cr_callback_arg = next_req; 1971 areq->an_isdual = B_TRUE; 1972 1973 /* 1974 * We would like to call kcf_submit_request() here. But, 1975 * that is not possible as that routine allocates a new 1976 * kcf_areq_node_t request structure, while we need to 1977 * reuse the existing request structure. 1978 */ 1979 switch (pd->pd_prov_type) { 1980 case CRYPTO_SW_PROVIDER: 1981 error = common_submit_request(pd, NULL, params, 1982 KCF_RHNDL(KM_NOSLEEP)); 1983 break; 1984 1985 case CRYPTO_HW_PROVIDER: { 1986 kcf_provider_desc_t *old_pd; 1987 taskq_t *taskq = pd->pd_sched_info.ks_taskq; 1988 1989 /* 1990 * Set the params for the second step in the 1991 * dual-ops. 1992 */ 1993 areq->an_params = *params; 1994 old_pd = areq->an_provider; 1995 KCF_PROV_REFRELE(old_pd); 1996 KCF_PROV_REFHOLD(pd); 1997 areq->an_provider = pd; 1998 1999 /* 2000 * Note that we have to do a taskq_dispatch() 2001 * here as we may be in interrupt context. 2002 */ 2003 if (taskq_dispatch(taskq, process_req_hwp, areq, 2004 TQ_NOSLEEP) == (taskqid_t)0) { 2005 error = CRYPTO_HOST_MEMORY; 2006 } else { 2007 error = CRYPTO_QUEUED; 2008 } 2009 break; 2010 } 2011 } 2012 2013 /* 2014 * We have to release the holds on the request and the provider 2015 * in all cases. 2016 */ 2017 KCF_AREQ_REFRELE(areq); 2018 KCF_PROV_REFRELE(pd); 2019 2020 if (error != CRYPTO_QUEUED) { 2021 /* restore, clean up, and invoke the client's callback */ 2022 2023 ct->dd_offset1 = next_req->kr_saveoffset; 2024 ct->dd_len1 = next_req->kr_savelen; 2025 areq->an_reqarg = next_req->kr_callreq; 2026 kmem_free(next_req, sizeof (kcf_dual_req_t)); 2027 areq->an_isdual = B_FALSE; 2028 kcf_aop_done(areq, error); 2029 } 2030 } 2031 2032 /* 2033 * Last part of an emulated dual operation. 2034 * Clean up and restore ... 2035 */ 2036 void 2037 kcf_last_req(void *last_req_arg, int status) 2038 { 2039 kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg; 2040 2041 kcf_req_params_t *params = &(last_req->kr_params); 2042 kcf_areq_node_t *areq = last_req->kr_areq; 2043 crypto_dual_data_t *ct; 2044 2045 switch (params->rp_opgrp) { 2046 case KCF_OG_MAC: { 2047 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); 2048 2049 ct = (crypto_dual_data_t *)mops->mo_data; 2050 break; 2051 } 2052 case KCF_OG_DECRYPT: { 2053 kcf_decrypt_ops_params_t *dcrops = 2054 &(params->rp_u.decrypt_params); 2055 2056 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; 2057 break; 2058 } 2059 } 2060 ct->dd_offset1 = last_req->kr_saveoffset; 2061 ct->dd_len1 = last_req->kr_savelen; 2062 2063 /* The submitter used kcf_last_req as its callback */ 2064 2065 if (areq == NULL) { 2066 crypto_call_req_t *cr = &last_req->kr_callreq; 2067 2068 (*(cr->cr_callback_func))(cr->cr_callback_arg, status); 2069 kmem_free(last_req, sizeof (kcf_dual_req_t)); 2070 return; 2071 } 2072 areq->an_reqarg = last_req->kr_callreq; 2073 KCF_AREQ_REFRELE(areq); 2074 kmem_free(last_req, sizeof (kcf_dual_req_t)); 2075 areq->an_isdual = B_FALSE; 2076 kcf_aop_done(areq, status); 2077 } 2078