1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file contains the core framework routines for the 30 * kernel cryptographic framework. These routines are at the 31 * layer, between the kernel API/ioctls and the SPI. 32 */ 33 34 #include <sys/types.h> 35 #include <sys/errno.h> 36 #include <sys/kmem.h> 37 #include <sys/proc.h> 38 #include <sys/cpuvar.h> 39 #include <sys/cpupart.h> 40 #include <sys/ksynch.h> 41 #include <sys/callb.h> 42 #include <sys/cmn_err.h> 43 #include <sys/systm.h> 44 #include <sys/sysmacros.h> 45 #include <sys/kstat.h> 46 #include <sys/crypto/common.h> 47 #include <sys/crypto/impl.h> 48 #include <sys/crypto/sched_impl.h> 49 #include <sys/crypto/api.h> 50 #include <sys/crypto/spi.h> 51 #include <sys/taskq_impl.h> 52 #include <sys/ddi.h> 53 #include <sys/sunddi.h> 54 55 56 kcf_global_swq_t *gswq; /* Global software queue */ 57 58 /* Thread pool related variables */ 59 static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */ 60 int kcf_maxthreads; 61 int kcf_minthreads; 62 int kcf_thr_multiple = 2; /* Boot-time tunable for experimentation */ 63 static ulong_t kcf_idlethr_timeout; 64 static boolean_t kcf_sched_running = B_FALSE; 65 #define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */ 66 67 /* kmem caches used by the scheduler */ 68 static struct kmem_cache *kcf_sreq_cache; 69 static struct kmem_cache *kcf_areq_cache; 70 static struct kmem_cache *kcf_context_cache; 71 72 /* Global request ID table */ 73 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES]; 74 75 /* KCF stats. Not protected. */ 76 static kcf_stats_t kcf_ksdata = { 77 { "total threads in pool", KSTAT_DATA_UINT32}, 78 { "idle threads in pool", KSTAT_DATA_UINT32}, 79 { "min threads in pool", KSTAT_DATA_UINT32}, 80 { "max threads in pool", KSTAT_DATA_UINT32}, 81 { "requests in gswq", KSTAT_DATA_UINT32}, 82 { "max requests in gswq", KSTAT_DATA_UINT32}, 83 { "minalloc for taskq", KSTAT_DATA_UINT32}, 84 { "maxalloc for taskq", KSTAT_DATA_UINT32} 85 }; 86 87 static kstat_t *kcf_misc_kstat = NULL; 88 ulong_t kcf_swprov_hndl = 0; 89 90 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *, 91 kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t); 92 static int kcf_disp_sw_request(kcf_areq_node_t *); 93 static void process_req_hwp(void *); 94 static kcf_areq_node_t *kcf_dequeue(); 95 static int kcf_enqueue(kcf_areq_node_t *); 96 static void kcf_failover_thread(); 97 static void kcfpool_alloc(); 98 static void kcf_reqid_delete(kcf_areq_node_t *areq); 99 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq); 100 static int kcf_misc_kstat_update(kstat_t *ksp, int rw); 101 static void compute_min_max_threads(); 102 103 104 /* 105 * Create a new context. 106 */ 107 crypto_ctx_t * 108 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd, 109 crypto_session_id_t sid) 110 { 111 crypto_ctx_t *ctx; 112 kcf_context_t *kcf_ctx; 113 114 kcf_ctx = kmem_cache_alloc(kcf_context_cache, 115 (crq == NULL) ? KM_SLEEP : KM_NOSLEEP); 116 if (kcf_ctx == NULL) 117 return (NULL); 118 119 /* initialize the context for the consumer */ 120 kcf_ctx->kc_refcnt = 1; 121 kcf_ctx->kc_need_signal = B_FALSE; 122 kcf_ctx->kc_req_chain_first = NULL; 123 kcf_ctx->kc_req_chain_last = NULL; 124 kcf_ctx->kc_secondctx = NULL; 125 KCF_PROV_REFHOLD(pd); 126 kcf_ctx->kc_prov_desc = pd; 127 kcf_ctx->kc_sw_prov_desc = NULL; 128 kcf_ctx->kc_mech = NULL; 129 130 ctx = &kcf_ctx->kc_glbl_ctx; 131 ctx->cc_provider = pd->pd_prov_handle; 132 ctx->cc_session = sid; 133 ctx->cc_provider_private = NULL; 134 ctx->cc_framework_private = (void *)kcf_ctx; 135 ctx->cc_flags = 0; 136 ctx->cc_opstate = NULL; 137 138 return (ctx); 139 } 140 141 /* 142 * Allocate a new async request node. 143 * 144 * ictx - Framework private context pointer 145 * crq - Has callback function and argument. Should be non NULL. 146 * req - The parameters to pass to the SPI 147 */ 148 static kcf_areq_node_t * 149 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx, 150 crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual) 151 { 152 kcf_areq_node_t *arptr, *areq; 153 154 ASSERT(crq != NULL); 155 arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP); 156 if (arptr == NULL) 157 return (NULL); 158 159 arptr->an_state = REQ_ALLOCATED; 160 arptr->an_reqarg = *crq; 161 arptr->an_params = *req; 162 arptr->an_context = ictx; 163 arptr->an_isdual = isdual; 164 165 arptr->an_next = arptr->an_prev = NULL; 166 KCF_PROV_REFHOLD(pd); 167 arptr->an_provider = pd; 168 arptr->an_tried_plist = NULL; 169 arptr->an_refcnt = 1; 170 arptr->an_idnext = arptr->an_idprev = NULL; 171 172 /* 173 * Requests for context-less operations do not use the 174 * fields - an_is_my_turn, and an_ctxchain_next. 175 */ 176 if (ictx == NULL) 177 return (arptr); 178 179 KCF_CONTEXT_REFHOLD(ictx); 180 /* 181 * Chain this request to the context. 182 */ 183 mutex_enter(&ictx->kc_in_use_lock); 184 arptr->an_ctxchain_next = NULL; 185 if ((areq = ictx->kc_req_chain_last) == NULL) { 186 arptr->an_is_my_turn = B_TRUE; 187 ictx->kc_req_chain_last = 188 ictx->kc_req_chain_first = arptr; 189 } else { 190 ASSERT(ictx->kc_req_chain_first != NULL); 191 arptr->an_is_my_turn = B_FALSE; 192 /* Insert the new request to the end of the chain. */ 193 areq->an_ctxchain_next = arptr; 194 ictx->kc_req_chain_last = arptr; 195 } 196 mutex_exit(&ictx->kc_in_use_lock); 197 198 return (arptr); 199 } 200 201 /* 202 * Queue the request node and do one of the following: 203 * - If there is an idle thread signal it to run. 204 * - If there is no idle thread and max running threads is not 205 * reached, signal the creator thread for more threads. 206 * 207 * If the two conditions above are not met, we don't need to do 208 * any thing. The request will be picked up by one of the 209 * worker threads when it becomes available. 210 */ 211 static int 212 kcf_disp_sw_request(kcf_areq_node_t *areq) 213 { 214 int err; 215 int cnt = 0; 216 217 if ((err = kcf_enqueue(areq)) != 0) 218 return (err); 219 220 if (kcfpool->kp_idlethreads > 0) { 221 /* Signal an idle thread to run */ 222 mutex_enter(&gswq->gs_lock); 223 cv_signal(&gswq->gs_cv); 224 mutex_exit(&gswq->gs_lock); 225 226 return (CRYPTO_QUEUED); 227 } 228 229 /* 230 * We keep the number of running threads to be at 231 * kcf_minthreads to reduce gs_lock contention. 232 */ 233 cnt = kcf_minthreads - 234 (kcfpool->kp_threads - kcfpool->kp_blockedthreads); 235 if (cnt > 0) { 236 /* 237 * The following ensures the number of threads in pool 238 * does not exceed kcf_maxthreads. 239 */ 240 cnt = min(cnt, kcf_maxthreads - kcfpool->kp_threads); 241 if (cnt > 0) { 242 /* Signal the creator thread for more threads */ 243 mutex_enter(&kcfpool->kp_user_lock); 244 if (!kcfpool->kp_signal_create_thread) { 245 kcfpool->kp_signal_create_thread = B_TRUE; 246 kcfpool->kp_nthrs = cnt; 247 cv_signal(&kcfpool->kp_user_cv); 248 } 249 mutex_exit(&kcfpool->kp_user_lock); 250 } 251 } 252 253 return (CRYPTO_QUEUED); 254 } 255 256 /* 257 * This routine is called by the taskq associated with 258 * each hardware provider. We notify the kernel consumer 259 * via the callback routine in case of CRYPTO_SUCCESS or 260 * a failure. 261 * 262 * A request can be of type kcf_areq_node_t or of type 263 * kcf_sreq_node_t. 264 */ 265 static void 266 process_req_hwp(void *ireq) 267 { 268 int error = 0; 269 crypto_ctx_t *ctx; 270 kcf_call_type_t ctype; 271 kcf_provider_desc_t *pd; 272 kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq; 273 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq; 274 275 pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ? 276 sreq->sn_provider : areq->an_provider; 277 278 mutex_enter(&pd->pd_lock); 279 280 /* 281 * Wait if flow control is in effect for the provider. A 282 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED 283 * notification will signal us. We also get signaled if 284 * the provider is unregistering. 285 */ 286 while (pd->pd_state == KCF_PROV_BUSY) 287 cv_wait(&pd->pd_resume_cv, &pd->pd_lock); 288 289 /* 290 * Bump the internal reference count while the request is being 291 * processed. This is how we know when it's safe to unregister 292 * a provider. This step must precede the pd_state check below. 293 */ 294 KCF_PROV_IREFHOLD(pd); 295 296 /* 297 * Fail the request if the provider has failed. We return a 298 * recoverable error and the notified clients attempt any 299 * recovery. For async clients this is done in kcf_aop_done() 300 * and for sync clients it is done in the k-api routines. 301 */ 302 if (pd->pd_state >= KCF_PROV_FAILED) { 303 mutex_exit(&pd->pd_lock); 304 error = CRYPTO_DEVICE_ERROR; 305 goto bail; 306 } 307 308 mutex_exit(&pd->pd_lock); 309 310 if (ctype == CRYPTO_SYNCH) { 311 mutex_enter(&sreq->sn_lock); 312 sreq->sn_state = REQ_INPROGRESS; 313 mutex_exit(&sreq->sn_lock); 314 315 ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL; 316 error = common_submit_request(sreq->sn_provider, ctx, 317 sreq->sn_params, sreq); 318 } else { 319 ASSERT(ctype == CRYPTO_ASYNCH); 320 321 mutex_enter(&areq->an_lock); 322 areq->an_state = REQ_INPROGRESS; 323 mutex_exit(&areq->an_lock); 324 325 /* 326 * We are in the per-hardware provider thread context and 327 * hence can sleep. Note that the caller would have done 328 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned. 329 */ 330 ctx = areq->an_context ? &areq->an_context->kc_glbl_ctx : NULL; 331 error = common_submit_request(areq->an_provider, ctx, 332 &areq->an_params, areq); 333 } 334 335 bail: 336 if (error == CRYPTO_QUEUED) { 337 /* 338 * The request is queued by the provider and we should 339 * get a crypto_op_notification() from the provider later. 340 * We notify the consumer at that time. 341 */ 342 return; 343 } else { /* CRYPTO_SUCCESS or other failure */ 344 KCF_PROV_IREFRELE(pd); 345 if (ctype == CRYPTO_SYNCH) 346 kcf_sop_done(sreq, error); 347 else 348 kcf_aop_done(areq, error); 349 } 350 } 351 352 /* 353 * This routine checks if a request can be retried on another 354 * provider. If true, mech1 is initialized to point to the mechanism 355 * structure. mech2 is also initialized in case of a dual operation. fg 356 * is initialized to the correct crypto_func_group_t bit flag. They are 357 * initialized by this routine, so that the caller can pass them to a 358 * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change. 359 * 360 * We check that the request is for a init or atomic routine and that 361 * it is for one of the operation groups used from k-api . 362 */ 363 static boolean_t 364 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1, 365 crypto_mechanism_t **mech2, crypto_func_group_t *fg) 366 { 367 kcf_req_params_t *params; 368 kcf_op_type_t optype; 369 370 params = &areq->an_params; 371 optype = params->rp_optype; 372 373 if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype))) 374 return (B_FALSE); 375 376 switch (params->rp_opgrp) { 377 case KCF_OG_DIGEST: { 378 kcf_digest_ops_params_t *dops = ¶ms->rp_u.digest_params; 379 380 dops->do_mech.cm_type = dops->do_framework_mechtype; 381 *mech1 = &dops->do_mech; 382 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST : 383 CRYPTO_FG_DIGEST_ATOMIC; 384 break; 385 } 386 387 case KCF_OG_MAC: { 388 kcf_mac_ops_params_t *mops = ¶ms->rp_u.mac_params; 389 390 mops->mo_mech.cm_type = mops->mo_framework_mechtype; 391 *mech1 = &mops->mo_mech; 392 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC : 393 CRYPTO_FG_MAC_ATOMIC; 394 break; 395 } 396 397 case KCF_OG_SIGN: { 398 kcf_sign_ops_params_t *sops = ¶ms->rp_u.sign_params; 399 400 sops->so_mech.cm_type = sops->so_framework_mechtype; 401 *mech1 = &sops->so_mech; 402 switch (optype) { 403 case KCF_OP_INIT: 404 *fg = CRYPTO_FG_SIGN; 405 break; 406 case KCF_OP_ATOMIC: 407 *fg = CRYPTO_FG_SIGN_ATOMIC; 408 break; 409 default: 410 ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC); 411 *fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC; 412 } 413 break; 414 } 415 416 case KCF_OG_VERIFY: { 417 kcf_verify_ops_params_t *vops = ¶ms->rp_u.verify_params; 418 419 vops->vo_mech.cm_type = vops->vo_framework_mechtype; 420 *mech1 = &vops->vo_mech; 421 switch (optype) { 422 case KCF_OP_INIT: 423 *fg = CRYPTO_FG_VERIFY; 424 break; 425 case KCF_OP_ATOMIC: 426 *fg = CRYPTO_FG_VERIFY_ATOMIC; 427 break; 428 default: 429 ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC); 430 *fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC; 431 } 432 break; 433 } 434 435 case KCF_OG_ENCRYPT: { 436 kcf_encrypt_ops_params_t *eops = ¶ms->rp_u.encrypt_params; 437 438 eops->eo_mech.cm_type = eops->eo_framework_mechtype; 439 *mech1 = &eops->eo_mech; 440 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT : 441 CRYPTO_FG_ENCRYPT_ATOMIC; 442 break; 443 } 444 445 case KCF_OG_DECRYPT: { 446 kcf_decrypt_ops_params_t *dcrops = ¶ms->rp_u.decrypt_params; 447 448 dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype; 449 *mech1 = &dcrops->dop_mech; 450 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT : 451 CRYPTO_FG_DECRYPT_ATOMIC; 452 break; 453 } 454 455 case KCF_OG_ENCRYPT_MAC: { 456 kcf_encrypt_mac_ops_params_t *eops = 457 ¶ms->rp_u.encrypt_mac_params; 458 459 eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype; 460 *mech1 = &eops->em_encr_mech; 461 eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype; 462 *mech2 = &eops->em_mac_mech; 463 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC : 464 CRYPTO_FG_ENCRYPT_MAC_ATOMIC; 465 break; 466 } 467 468 case KCF_OG_MAC_DECRYPT: { 469 kcf_mac_decrypt_ops_params_t *dops = 470 ¶ms->rp_u.mac_decrypt_params; 471 472 dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype; 473 *mech1 = &dops->md_mac_mech; 474 dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype; 475 *mech2 = &dops->md_decr_mech; 476 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT : 477 CRYPTO_FG_MAC_DECRYPT_ATOMIC; 478 break; 479 } 480 481 default: 482 return (B_FALSE); 483 } 484 485 return (B_TRUE); 486 } 487 488 /* 489 * This routine is called when a request to a provider has failed 490 * with a recoverable error. This routine tries to find another provider 491 * and dispatches the request to the new provider, if one is available. 492 * We reuse the request structure. 493 * 494 * A return value of NULL from kcf_get_mech_provider() indicates 495 * we have tried the last provider. 496 */ 497 static int 498 kcf_resubmit_request(kcf_areq_node_t *areq) 499 { 500 int error = CRYPTO_FAILED; 501 kcf_context_t *ictx; 502 kcf_provider_desc_t *old_pd; 503 kcf_provider_desc_t *new_pd; 504 crypto_mechanism_t *mech1 = NULL, *mech2 = NULL; 505 crypto_mech_type_t prov_mt1, prov_mt2; 506 crypto_func_group_t fg; 507 508 if (!can_resubmit(areq, &mech1, &mech2, &fg)) 509 return (error); 510 511 old_pd = areq->an_provider; 512 /* 513 * Add old_pd to the list of providers already tried. We release 514 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in 515 * kcf_free_triedlist(). 516 */ 517 if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd, 518 KM_NOSLEEP) == NULL) 519 return (error); 520 521 if (mech1 && !mech2) { 522 new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error, 523 areq->an_tried_plist, fg, 524 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); 525 } else { 526 ASSERT(mech1 != NULL && mech2 != NULL); 527 528 new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1, 529 &prov_mt2, &error, areq->an_tried_plist, fg, fg, 530 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); 531 } 532 533 if (new_pd == NULL) 534 return (error); 535 536 /* 537 * We reuse the old context by resetting provider specific 538 * fields in it. 539 */ 540 if ((ictx = areq->an_context) != NULL) { 541 crypto_ctx_t *ctx; 542 543 ASSERT(old_pd == ictx->kc_prov_desc); 544 KCF_PROV_REFRELE(ictx->kc_prov_desc); 545 KCF_PROV_REFHOLD(new_pd); 546 ictx->kc_prov_desc = new_pd; 547 548 ctx = &ictx->kc_glbl_ctx; 549 ctx->cc_provider = new_pd->pd_prov_handle; 550 ctx->cc_session = new_pd->pd_sid; 551 ctx->cc_provider_private = NULL; 552 } 553 554 /* We reuse areq. by resetting the provider and context fields. */ 555 KCF_PROV_REFRELE(old_pd); 556 KCF_PROV_REFHOLD(new_pd); 557 areq->an_provider = new_pd; 558 mutex_enter(&areq->an_lock); 559 areq->an_state = REQ_WAITING; 560 mutex_exit(&areq->an_lock); 561 562 switch (new_pd->pd_prov_type) { 563 case CRYPTO_SW_PROVIDER: 564 error = kcf_disp_sw_request(areq); 565 break; 566 567 case CRYPTO_HW_PROVIDER: { 568 taskq_t *taskq = new_pd->pd_sched_info.ks_taskq; 569 570 if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == 571 (taskqid_t)0) { 572 error = CRYPTO_HOST_MEMORY; 573 } else { 574 error = CRYPTO_QUEUED; 575 } 576 577 break; 578 } 579 } 580 581 return (error); 582 } 583 584 #define EMPTY_TASKQ(tq) ((tq)->tq_task.tqent_next == &(tq)->tq_task) 585 586 /* 587 * Routine called by both ioctl and k-api. The consumer should 588 * bundle the parameters into a kcf_req_params_t structure. A bunch 589 * of macros are available in ops_impl.h for this bundling. They are: 590 * 591 * KCF_WRAP_DIGEST_OPS_PARAMS() 592 * KCF_WRAP_MAC_OPS_PARAMS() 593 * KCF_WRAP_ENCRYPT_OPS_PARAMS() 594 * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc. 595 * 596 * It is the caller's responsibility to free the ctx argument when 597 * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details. 598 */ 599 int 600 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 601 crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont) 602 { 603 int error = CRYPTO_SUCCESS; 604 kcf_areq_node_t *areq; 605 kcf_sreq_node_t *sreq; 606 kcf_context_t *kcf_ctx; 607 taskq_t *taskq = pd->pd_sched_info.ks_taskq; 608 609 kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL; 610 611 /* Synchronous cases */ 612 if (crq == NULL) { 613 switch (pd->pd_prov_type) { 614 case CRYPTO_SW_PROVIDER: 615 error = common_submit_request(pd, ctx, params, 616 KCF_RHNDL(KM_SLEEP)); 617 break; 618 619 case CRYPTO_HW_PROVIDER: 620 sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP); 621 sreq->sn_state = REQ_ALLOCATED; 622 sreq->sn_rv = CRYPTO_FAILED; 623 624 sreq->sn_params = params; 625 KCF_PROV_REFHOLD(pd); 626 sreq->sn_provider = pd; 627 628 /* 629 * Note that we do not need to hold the context 630 * for synchronous case as the context will never 631 * become invalid underneath us in this case. 632 */ 633 sreq->sn_context = kcf_ctx; 634 635 ASSERT(taskq != NULL); 636 /* 637 * Call the SPI directly if the taskq is empty and the 638 * provider is not busy, else dispatch to the taskq. 639 * Calling directly is fine as this is the synchronous 640 * case. This is unlike the asynchronous case where we 641 * must always dispatch to the taskq. 642 */ 643 if (EMPTY_TASKQ(taskq) && 644 pd->pd_state == KCF_PROV_READY) { 645 process_req_hwp(sreq); 646 } else { 647 /* 648 * We can not tell from taskq_dispatch() return 649 * value if we exceeded maxalloc. Hence the 650 * check here. Since we are allowed to wait in 651 * the synchronous case, we wait for the taskq 652 * to become empty. 653 */ 654 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { 655 taskq_wait(taskq); 656 } 657 if (taskq_dispatch(taskq, process_req_hwp, 658 sreq, TQ_SLEEP) == (taskqid_t)0) { 659 error = CRYPTO_HOST_MEMORY; 660 KCF_PROV_REFRELE(sreq->sn_provider); 661 kmem_cache_free(kcf_sreq_cache, sreq); 662 goto done; 663 } 664 } 665 666 /* 667 * Wait for the notification to arrive, 668 * if the operation is not done yet. 669 * Bug# 4722589 will make the wait a cv_wait_sig(). 670 */ 671 mutex_enter(&sreq->sn_lock); 672 while (sreq->sn_state < REQ_DONE) 673 cv_wait(&sreq->sn_cv, &sreq->sn_lock); 674 mutex_exit(&sreq->sn_lock); 675 676 error = sreq->sn_rv; 677 KCF_PROV_REFRELE(sreq->sn_provider); 678 kmem_cache_free(kcf_sreq_cache, sreq); 679 680 break; 681 682 default: 683 error = CRYPTO_FAILED; 684 break; 685 } 686 687 } else { /* Asynchronous cases */ 688 switch (pd->pd_prov_type) { 689 case CRYPTO_SW_PROVIDER: 690 if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) { 691 /* 692 * This case has less overhead since there is 693 * no switching of context. 694 */ 695 error = common_submit_request(pd, ctx, params, 696 KCF_RHNDL(KM_NOSLEEP)); 697 } else { 698 /* 699 * CRYPTO_ALWAYS_QUEUE is set. We need to 700 * queue the request and return. 701 */ 702 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, 703 params, cont); 704 if (areq == NULL) 705 error = CRYPTO_HOST_MEMORY; 706 else { 707 if (!(crq->cr_flag 708 & CRYPTO_SKIP_REQID)) { 709 /* 710 * Set the request handle. This handle 711 * is used for any crypto_cancel_req(9f) 712 * calls from the consumer. We have to 713 * do this before dispatching the 714 * request. 715 */ 716 crq->cr_reqid = kcf_reqid_insert(areq); 717 } 718 719 error = kcf_disp_sw_request(areq); 720 /* 721 * There is an error processing this 722 * request. Remove the handle and 723 * release the request structure. 724 */ 725 if (error != CRYPTO_QUEUED) { 726 if (!(crq->cr_flag 727 & CRYPTO_SKIP_REQID)) 728 kcf_reqid_delete(areq); 729 KCF_AREQ_REFRELE(areq); 730 } 731 } 732 } 733 break; 734 735 case CRYPTO_HW_PROVIDER: 736 /* 737 * We need to queue the request and return. 738 */ 739 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params, 740 cont); 741 if (areq == NULL) { 742 error = CRYPTO_HOST_MEMORY; 743 goto done; 744 } 745 746 ASSERT(taskq != NULL); 747 /* 748 * We can not tell from taskq_dispatch() return 749 * value if we exceeded maxalloc. Hence the check 750 * here. 751 */ 752 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { 753 error = CRYPTO_BUSY; 754 KCF_AREQ_REFRELE(areq); 755 goto done; 756 } 757 758 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) { 759 /* 760 * Set the request handle. This handle is used 761 * for any crypto_cancel_req(9f) calls from the 762 * consumer. We have to do this before dispatching 763 * the request. 764 */ 765 crq->cr_reqid = kcf_reqid_insert(areq); 766 } 767 768 if (taskq_dispatch(taskq, 769 process_req_hwp, areq, TQ_NOSLEEP) == 770 (taskqid_t)0) { 771 error = CRYPTO_HOST_MEMORY; 772 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) 773 kcf_reqid_delete(areq); 774 KCF_AREQ_REFRELE(areq); 775 } else { 776 error = CRYPTO_QUEUED; 777 } 778 break; 779 780 default: 781 error = CRYPTO_FAILED; 782 break; 783 } 784 } 785 786 done: 787 return (error); 788 } 789 790 /* 791 * We're done with this framework context, so free it. Note that freeing 792 * framework context (kcf_context) frees the global context (crypto_ctx). 793 * 794 * The provider is responsible for freeing provider private context after a 795 * final or single operation and resetting the cc_provider_private field 796 * to NULL. It should do this before it notifies the framework of the 797 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases 798 * like crypto_cancel_ctx(9f). 799 */ 800 void 801 kcf_free_context(kcf_context_t *kcf_ctx) 802 { 803 kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc; 804 crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx; 805 kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx; 806 807 /* Release the second context, if any */ 808 809 if (kcf_secondctx != NULL) 810 KCF_CONTEXT_REFRELE(kcf_secondctx); 811 812 if (gctx->cc_provider_private != NULL) { 813 mutex_enter(&pd->pd_lock); 814 if (!KCF_IS_PROV_REMOVED(pd)) { 815 /* 816 * Increment the provider's internal refcnt so it 817 * doesn't unregister from the framework while 818 * we're calling the entry point. 819 */ 820 KCF_PROV_IREFHOLD(pd); 821 mutex_exit(&pd->pd_lock); 822 (void) KCF_PROV_FREE_CONTEXT(pd, gctx); 823 KCF_PROV_IREFRELE(pd); 824 } else { 825 mutex_exit(&pd->pd_lock); 826 } 827 } 828 829 /* kcf_ctx->kc_prov_desc has a hold on pd */ 830 KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc); 831 832 /* check if this context is shared with a software provider */ 833 if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) && 834 kcf_ctx->kc_sw_prov_desc != NULL) { 835 KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc); 836 } 837 838 kmem_cache_free(kcf_context_cache, kcf_ctx); 839 } 840 841 /* 842 * Free the request after releasing all the holds. 843 */ 844 void 845 kcf_free_req(kcf_areq_node_t *areq) 846 { 847 KCF_PROV_REFRELE(areq->an_provider); 848 if (areq->an_context != NULL) 849 KCF_CONTEXT_REFRELE(areq->an_context); 850 851 if (areq->an_tried_plist != NULL) 852 kcf_free_triedlist(areq->an_tried_plist); 853 kmem_cache_free(kcf_areq_cache, areq); 854 } 855 856 /* 857 * Utility routine to remove a request from the chain of requests 858 * hanging off a context. 859 */ 860 void 861 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq) 862 { 863 kcf_areq_node_t *cur, *prev; 864 865 /* 866 * Get context lock, search for areq in the chain and remove it. 867 */ 868 ASSERT(ictx != NULL); 869 mutex_enter(&ictx->kc_in_use_lock); 870 prev = cur = ictx->kc_req_chain_first; 871 872 while (cur != NULL) { 873 if (cur == areq) { 874 if (prev == cur) { 875 if ((ictx->kc_req_chain_first = 876 cur->an_ctxchain_next) == NULL) 877 ictx->kc_req_chain_last = NULL; 878 } else { 879 if (cur == ictx->kc_req_chain_last) 880 ictx->kc_req_chain_last = prev; 881 prev->an_ctxchain_next = cur->an_ctxchain_next; 882 } 883 884 break; 885 } 886 prev = cur; 887 cur = cur->an_ctxchain_next; 888 } 889 mutex_exit(&ictx->kc_in_use_lock); 890 } 891 892 /* 893 * Remove the specified node from the global software queue. 894 * 895 * The caller must hold the queue lock and request lock (an_lock). 896 */ 897 void 898 kcf_remove_node(kcf_areq_node_t *node) 899 { 900 kcf_areq_node_t *nextp = node->an_next; 901 kcf_areq_node_t *prevp = node->an_prev; 902 903 ASSERT(mutex_owned(&gswq->gs_lock)); 904 905 if (nextp != NULL) 906 nextp->an_prev = prevp; 907 else 908 gswq->gs_last = prevp; 909 910 if (prevp != NULL) 911 prevp->an_next = nextp; 912 else 913 gswq->gs_first = nextp; 914 915 ASSERT(mutex_owned(&node->an_lock)); 916 node->an_state = REQ_CANCELED; 917 } 918 919 /* 920 * Remove and return the first node in the global software queue. 921 * 922 * The caller must hold the queue lock. 923 */ 924 static kcf_areq_node_t * 925 kcf_dequeue() 926 { 927 kcf_areq_node_t *tnode = NULL; 928 929 ASSERT(mutex_owned(&gswq->gs_lock)); 930 if ((tnode = gswq->gs_first) == NULL) { 931 return (NULL); 932 } else { 933 ASSERT(gswq->gs_first->an_prev == NULL); 934 gswq->gs_first = tnode->an_next; 935 if (tnode->an_next == NULL) 936 gswq->gs_last = NULL; 937 else 938 tnode->an_next->an_prev = NULL; 939 } 940 941 gswq->gs_njobs--; 942 return (tnode); 943 } 944 945 /* 946 * Add the request node to the end of the global software queue. 947 * 948 * The caller should not hold the queue lock. Returns 0 if the 949 * request is successfully queued. Returns CRYPTO_BUSY if the limit 950 * on the number of jobs is exceeded. 951 */ 952 static int 953 kcf_enqueue(kcf_areq_node_t *node) 954 { 955 kcf_areq_node_t *tnode; 956 957 mutex_enter(&gswq->gs_lock); 958 959 if (gswq->gs_njobs >= gswq->gs_maxjobs) { 960 mutex_exit(&gswq->gs_lock); 961 return (CRYPTO_BUSY); 962 } 963 964 if (gswq->gs_last == NULL) { 965 gswq->gs_first = gswq->gs_last = node; 966 } else { 967 ASSERT(gswq->gs_last->an_next == NULL); 968 tnode = gswq->gs_last; 969 tnode->an_next = node; 970 gswq->gs_last = node; 971 node->an_prev = tnode; 972 } 973 974 gswq->gs_njobs++; 975 976 /* an_lock not needed here as we hold gs_lock */ 977 node->an_state = REQ_WAITING; 978 979 mutex_exit(&gswq->gs_lock); 980 981 return (0); 982 } 983 984 /* 985 * Decrement the thread pool count and signal the failover 986 * thread if we are the last one out. 987 */ 988 static void 989 kcf_decrcnt_andsignal() 990 { 991 KCF_ATOMIC_DECR(kcfpool->kp_threads); 992 993 mutex_enter(&kcfpool->kp_thread_lock); 994 if (kcfpool->kp_threads == 0) 995 cv_signal(&kcfpool->kp_nothr_cv); 996 mutex_exit(&kcfpool->kp_thread_lock); 997 } 998 999 /* 1000 * Function run by a thread from kcfpool to work on global software queue. 1001 * It is called from ioctl(CRYPTO_POOL_RUN, ...). 1002 */ 1003 int 1004 kcf_svc_do_run(void) 1005 { 1006 int error = 0; 1007 clock_t rv; 1008 clock_t timeout_val; 1009 kcf_areq_node_t *req; 1010 kcf_context_t *ictx; 1011 kcf_provider_desc_t *pd; 1012 1013 KCF_ATOMIC_INCR(kcfpool->kp_threads); 1014 1015 for (;;) { 1016 mutex_enter(&gswq->gs_lock); 1017 1018 while ((req = kcf_dequeue()) == NULL) { 1019 timeout_val = ddi_get_lbolt() + 1020 drv_usectohz(kcf_idlethr_timeout); 1021 1022 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads); 1023 rv = cv_timedwait_sig(&gswq->gs_cv, &gswq->gs_lock, 1024 timeout_val); 1025 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads); 1026 1027 switch (rv) { 1028 case 0: 1029 /* 1030 * A signal (as in kill(2)) is pending. We did 1031 * not get any cv_signal(). 1032 */ 1033 kcf_decrcnt_andsignal(); 1034 mutex_exit(&gswq->gs_lock); 1035 return (EINTR); 1036 1037 case -1: 1038 /* 1039 * Timed out and we are not signaled. Let us 1040 * see if this thread should exit. We should 1041 * keep at least kcf_minthreads. 1042 */ 1043 if (kcfpool->kp_threads > kcf_minthreads) { 1044 kcf_decrcnt_andsignal(); 1045 mutex_exit(&gswq->gs_lock); 1046 return (0); 1047 } 1048 1049 /* Resume the wait for work */ 1050 break; 1051 1052 default: 1053 /* 1054 * We are signaled to work on the queue. 1055 */ 1056 break; 1057 } 1058 } 1059 1060 mutex_exit(&gswq->gs_lock); 1061 1062 ictx = req->an_context; 1063 if (ictx == NULL) { /* Context-less operation */ 1064 pd = req->an_provider; 1065 error = common_submit_request(pd, NULL, 1066 &req->an_params, req); 1067 kcf_aop_done(req, error); 1068 continue; 1069 } 1070 1071 /* 1072 * We check if we can work on the request now. 1073 * Solaris does not guarantee any order on how the threads 1074 * are scheduled or how the waiters on a mutex are chosen. 1075 * So, we need to maintain our own order. 1076 * 1077 * is_my_turn is set to B_TRUE initially for a request when 1078 * it is enqueued and there are no other requests 1079 * for that context. Note that a thread sleeping on 1080 * kc_in_use_cv is not counted as an idle thread. This is 1081 * because we define an idle thread as one that sleeps on the 1082 * global queue waiting for new requests. 1083 */ 1084 mutex_enter(&ictx->kc_in_use_lock); 1085 while (req->an_is_my_turn == B_FALSE) { 1086 ictx->kc_need_signal = B_TRUE; 1087 KCF_ATOMIC_INCR(kcfpool->kp_blockedthreads); 1088 cv_wait(&ictx->kc_in_use_cv, &ictx->kc_in_use_lock); 1089 KCF_ATOMIC_DECR(kcfpool->kp_blockedthreads); 1090 } 1091 mutex_exit(&ictx->kc_in_use_lock); 1092 1093 mutex_enter(&req->an_lock); 1094 req->an_state = REQ_INPROGRESS; 1095 mutex_exit(&req->an_lock); 1096 1097 pd = ictx->kc_prov_desc; 1098 ASSERT(pd == req->an_provider); 1099 error = common_submit_request(pd, &ictx->kc_glbl_ctx, 1100 &req->an_params, req); 1101 1102 kcf_aop_done(req, error); 1103 } 1104 } 1105 1106 /* 1107 * kmem_cache_alloc constructor for sync request structure. 1108 */ 1109 /* ARGSUSED */ 1110 static int 1111 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags) 1112 { 1113 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; 1114 1115 sreq->sn_type = CRYPTO_SYNCH; 1116 cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL); 1117 mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL); 1118 1119 return (0); 1120 } 1121 1122 /* ARGSUSED */ 1123 static void 1124 kcf_sreq_cache_destructor(void *buf, void *cdrarg) 1125 { 1126 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; 1127 1128 mutex_destroy(&sreq->sn_lock); 1129 cv_destroy(&sreq->sn_cv); 1130 } 1131 1132 /* 1133 * kmem_cache_alloc constructor for async request structure. 1134 */ 1135 /* ARGSUSED */ 1136 static int 1137 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags) 1138 { 1139 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf; 1140 1141 areq->an_type = CRYPTO_ASYNCH; 1142 mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL); 1143 cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL); 1144 1145 return (0); 1146 } 1147 1148 /* ARGSUSED */ 1149 static void 1150 kcf_areq_cache_destructor(void *buf, void *cdrarg) 1151 { 1152 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf; 1153 1154 ASSERT(areq->an_refcnt == 0); 1155 mutex_destroy(&areq->an_lock); 1156 cv_destroy(&areq->an_done); 1157 } 1158 1159 /* 1160 * kmem_cache_alloc constructor for kcf_context structure. 1161 */ 1162 /* ARGSUSED */ 1163 static int 1164 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags) 1165 { 1166 kcf_context_t *kctx = (kcf_context_t *)buf; 1167 1168 mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL); 1169 cv_init(&kctx->kc_in_use_cv, NULL, CV_DEFAULT, NULL); 1170 1171 return (0); 1172 } 1173 1174 /* ARGSUSED */ 1175 static void 1176 kcf_context_cache_destructor(void *buf, void *cdrarg) 1177 { 1178 kcf_context_t *kctx = (kcf_context_t *)buf; 1179 1180 ASSERT(kctx->kc_refcnt == 0); 1181 mutex_destroy(&kctx->kc_in_use_lock); 1182 cv_destroy(&kctx->kc_in_use_cv); 1183 } 1184 1185 /* 1186 * Creates and initializes all the structures needed by the framework. 1187 */ 1188 void 1189 kcf_sched_init(void) 1190 { 1191 int i; 1192 kcf_reqid_table_t *rt; 1193 1194 /* 1195 * Create all the kmem caches needed by the framework. We set the 1196 * align argument to 64, to get a slab aligned to 64-byte as well as 1197 * have the objects (cache_chunksize) to be a 64-byte multiple. 1198 * This helps to avoid false sharing as this is the size of the 1199 * CPU cache line. 1200 */ 1201 kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache", 1202 sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor, 1203 kcf_sreq_cache_destructor, NULL, NULL, NULL, 0); 1204 1205 kcf_areq_cache = kmem_cache_create("kcf_areq_cache", 1206 sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor, 1207 kcf_areq_cache_destructor, NULL, NULL, NULL, 0); 1208 1209 kcf_context_cache = kmem_cache_create("kcf_context_cache", 1210 sizeof (struct kcf_context), 64, kcf_context_cache_constructor, 1211 kcf_context_cache_destructor, NULL, NULL, NULL, 0); 1212 1213 mutex_init(&kcf_dh_lock, NULL, MUTEX_DEFAULT, NULL); 1214 1215 gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP); 1216 1217 mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL); 1218 cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL); 1219 gswq->gs_njobs = 0; 1220 compute_min_max_threads(); /* Computes gs_maxjobs also. */ 1221 gswq->gs_first = gswq->gs_last = NULL; 1222 1223 /* Initialize the global reqid table */ 1224 for (i = 0; i < REQID_TABLES; i++) { 1225 rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP); 1226 kcf_reqid_table[i] = rt; 1227 mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL); 1228 rt->rt_curid = i; 1229 } 1230 1231 /* Allocate and initialize the thread pool */ 1232 kcfpool_alloc(); 1233 1234 /* Initialize the event notification list variables */ 1235 mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL); 1236 cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL); 1237 1238 /* Initialize the crypto_bufcall list variables */ 1239 mutex_init(&cbuf_list_lock, NULL, MUTEX_DEFAULT, NULL); 1240 cv_init(&cbuf_list_cv, NULL, CV_DEFAULT, NULL); 1241 1242 /* Create the kcf kstat */ 1243 kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto", 1244 KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t), 1245 KSTAT_FLAG_VIRTUAL); 1246 1247 if (kcf_misc_kstat != NULL) { 1248 kcf_misc_kstat->ks_data = &kcf_ksdata; 1249 kcf_misc_kstat->ks_update = kcf_misc_kstat_update; 1250 kstat_install(kcf_misc_kstat); 1251 } 1252 } 1253 1254 /* 1255 * This routine should only be called by drv/cryptoadm. 1256 * 1257 * kcf_sched_running flag isn't protected by a lock. But, we are safe because 1258 * the first thread ("cryptoadm refresh") calling this routine during 1259 * boot time completes before any other thread that can call this routine. 1260 */ 1261 void 1262 kcf_sched_start(void) 1263 { 1264 if (kcf_sched_running) 1265 return; 1266 1267 /* Start the failover kernel thread for now */ 1268 (void) thread_create(NULL, 0, &kcf_failover_thread, 0, 0, &p0, 1269 TS_RUN, minclsyspri); 1270 1271 /* Start the background processing thread. */ 1272 (void) thread_create(NULL, 0, &crypto_bufcall_service, 0, 0, &p0, 1273 TS_RUN, minclsyspri); 1274 1275 kcf_sched_running = B_TRUE; 1276 } 1277 1278 /* 1279 * Signal the waiting sync client. 1280 */ 1281 void 1282 kcf_sop_done(kcf_sreq_node_t *sreq, int error) 1283 { 1284 mutex_enter(&sreq->sn_lock); 1285 sreq->sn_state = REQ_DONE; 1286 sreq->sn_rv = error; 1287 cv_signal(&sreq->sn_cv); 1288 mutex_exit(&sreq->sn_lock); 1289 } 1290 1291 /* 1292 * Callback the async client with the operation status. 1293 * We free the async request node and possibly the context. 1294 * We also handle any chain of requests hanging off of 1295 * the context. 1296 */ 1297 void 1298 kcf_aop_done(kcf_areq_node_t *areq, int error) 1299 { 1300 kcf_op_type_t optype; 1301 boolean_t skip_notify = B_FALSE; 1302 kcf_context_t *ictx; 1303 kcf_areq_node_t *nextreq; 1304 1305 /* 1306 * Handle recoverable errors. This has to be done first 1307 * before doing any thing else in this routine so that 1308 * we do not change the state of the request. 1309 */ 1310 if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { 1311 /* 1312 * We try another provider, if one is available. Else 1313 * we continue with the failure notification to the 1314 * client. 1315 */ 1316 if (kcf_resubmit_request(areq) == CRYPTO_QUEUED) 1317 return; 1318 } 1319 1320 mutex_enter(&areq->an_lock); 1321 areq->an_state = REQ_DONE; 1322 mutex_exit(&areq->an_lock); 1323 1324 optype = (&areq->an_params)->rp_optype; 1325 if ((ictx = areq->an_context) != NULL) { 1326 /* 1327 * A request after it is removed from the request 1328 * queue, still stays on a chain of requests hanging 1329 * of its context structure. It needs to be removed 1330 * from this chain at this point. 1331 */ 1332 mutex_enter(&ictx->kc_in_use_lock); 1333 nextreq = areq->an_ctxchain_next; 1334 ASSERT(nextreq != NULL || ictx->kc_need_signal == B_FALSE); 1335 1336 if (nextreq != NULL) { 1337 nextreq->an_is_my_turn = B_TRUE; 1338 /* 1339 * Currently, the following case happens 1340 * only for software providers. 1341 */ 1342 if (ictx->kc_need_signal) { 1343 cv_broadcast(&ictx->kc_in_use_cv); 1344 ictx->kc_need_signal = B_FALSE; 1345 } 1346 } 1347 1348 ictx->kc_req_chain_first = nextreq; 1349 if (nextreq == NULL) 1350 ictx->kc_req_chain_last = NULL; 1351 mutex_exit(&ictx->kc_in_use_lock); 1352 1353 if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) { 1354 ASSERT(nextreq == NULL); 1355 KCF_CONTEXT_REFRELE(ictx); 1356 } else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) { 1357 /* 1358 * NOTE - We do not release the context in case of update 1359 * operations. We require the consumer to free it explicitly, 1360 * in case it wants to abandon an update operation. This is done 1361 * as there may be mechanisms in ECB mode that can continue 1362 * even if an operation on a block fails. 1363 */ 1364 KCF_CONTEXT_REFRELE(ictx); 1365 } 1366 } 1367 1368 /* Deal with the internal continuation to this request first */ 1369 1370 if (areq->an_isdual) { 1371 kcf_dual_req_t *next_arg; 1372 next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg; 1373 next_arg->kr_areq = areq; 1374 KCF_AREQ_REFHOLD(areq); 1375 areq->an_isdual = B_FALSE; 1376 1377 NOTIFY_CLIENT(areq, error); 1378 return; 1379 } 1380 1381 /* 1382 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify 1383 * always. If this flag is clear, we skip the notification 1384 * provided there are no errors. We check this flag for only 1385 * init or update operations. It is ignored for single, final or 1386 * atomic operations. 1387 */ 1388 skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) && 1389 (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) && 1390 (error == CRYPTO_SUCCESS); 1391 1392 if (!skip_notify) { 1393 NOTIFY_CLIENT(areq, error); 1394 } 1395 1396 if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID)) 1397 kcf_reqid_delete(areq); 1398 1399 KCF_AREQ_REFRELE(areq); 1400 } 1401 1402 /* 1403 * Allocate the thread pool and initialize all the fields. 1404 */ 1405 static void 1406 kcfpool_alloc() 1407 { 1408 kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP); 1409 1410 kcfpool->kp_threads = kcfpool->kp_idlethreads = 0; 1411 kcfpool->kp_blockedthreads = 0; 1412 kcfpool->kp_signal_create_thread = B_FALSE; 1413 kcfpool->kp_nthrs = 0; 1414 kcfpool->kp_user_waiting = B_FALSE; 1415 1416 mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL); 1417 cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL); 1418 1419 mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL); 1420 cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL); 1421 1422 kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT; 1423 } 1424 1425 /* 1426 * This function is run by the 'creator' thread in the pool. 1427 * It is called from ioctl(CRYPTO_POOL_WAIT, ...). 1428 */ 1429 int 1430 kcf_svc_wait(int *nthrs) 1431 { 1432 clock_t rv; 1433 clock_t timeout_val; 1434 1435 if (kcfpool == NULL) 1436 return (ENOENT); 1437 1438 mutex_enter(&kcfpool->kp_user_lock); 1439 /* Check if there's already a user thread waiting on this kcfpool */ 1440 if (kcfpool->kp_user_waiting) { 1441 mutex_exit(&kcfpool->kp_user_lock); 1442 *nthrs = 0; 1443 return (EBUSY); 1444 } 1445 1446 kcfpool->kp_user_waiting = B_TRUE; 1447 1448 /* Go to sleep, waiting for the signaled flag. */ 1449 while (!kcfpool->kp_signal_create_thread) { 1450 timeout_val = ddi_get_lbolt() + 1451 drv_usectohz(kcf_idlethr_timeout); 1452 1453 rv = cv_timedwait_sig(&kcfpool->kp_user_cv, 1454 &kcfpool->kp_user_lock, timeout_val); 1455 switch (rv) { 1456 case 0: 1457 /* Interrupted, return to handle exit or signal */ 1458 kcfpool->kp_user_waiting = B_FALSE; 1459 kcfpool->kp_signal_create_thread = B_FALSE; 1460 mutex_exit(&kcfpool->kp_user_lock); 1461 /* 1462 * kcfd is exiting. Release the door and 1463 * invalidate it. 1464 */ 1465 mutex_enter(&kcf_dh_lock); 1466 if (kcf_dh != NULL) { 1467 door_ki_rele(kcf_dh); 1468 kcf_dh = NULL; 1469 } 1470 mutex_exit(&kcf_dh_lock); 1471 return (EINTR); 1472 1473 case -1: 1474 /* Timed out. Recalculate the min/max threads */ 1475 compute_min_max_threads(); 1476 break; 1477 1478 default: 1479 /* Worker thread did a cv_signal() */ 1480 break; 1481 } 1482 } 1483 1484 kcfpool->kp_signal_create_thread = B_FALSE; 1485 kcfpool->kp_user_waiting = B_FALSE; 1486 1487 *nthrs = kcfpool->kp_nthrs; 1488 mutex_exit(&kcfpool->kp_user_lock); 1489 1490 /* Return to userland for possible thread creation. */ 1491 return (0); 1492 } 1493 1494 1495 /* 1496 * This routine introduces a locking order for gswq->gs_lock followed 1497 * by cpu_lock. 1498 * This means that no consumer of the k-api should hold cpu_lock when calling 1499 * k-api routines. 1500 */ 1501 static void 1502 compute_min_max_threads() 1503 { 1504 psetid_t psid = PS_MYID; 1505 1506 mutex_enter(&gswq->gs_lock); 1507 if (cpupart_get_cpus(&psid, NULL, (uint_t *)&kcf_minthreads) != 0) { 1508 cmn_err(CE_WARN, "kcf:compute_min_max_threads cpupart_get_cpus:" 1509 " failed, setting kcf_minthreads to 1"); 1510 kcf_minthreads = 1; 1511 } 1512 kcf_maxthreads = kcf_thr_multiple * kcf_minthreads; 1513 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc; 1514 mutex_exit(&gswq->gs_lock); 1515 } 1516 1517 /* 1518 * This is the main routine of the failover kernel thread. 1519 * If there are any threads in the pool we sleep. The last thread in the 1520 * pool to exit will signal us to get to work. We get back to sleep 1521 * once we detect that the pool has threads. 1522 * 1523 * Note that in the hand-off from us to a pool thread we get to run once. 1524 * Since this hand-off is a rare event this should be fine. 1525 */ 1526 static void 1527 kcf_failover_thread() 1528 { 1529 int error = 0; 1530 kcf_context_t *ictx; 1531 kcf_areq_node_t *req; 1532 callb_cpr_t cpr_info; 1533 kmutex_t cpr_lock; 1534 static boolean_t is_logged = B_FALSE; 1535 1536 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 1537 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, 1538 "kcf_failover_thread"); 1539 1540 for (;;) { 1541 /* 1542 * Wait if there are any threads are in the pool. 1543 */ 1544 if (kcfpool->kp_threads > 0) { 1545 mutex_enter(&cpr_lock); 1546 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1547 mutex_exit(&cpr_lock); 1548 1549 mutex_enter(&kcfpool->kp_thread_lock); 1550 cv_wait(&kcfpool->kp_nothr_cv, 1551 &kcfpool->kp_thread_lock); 1552 mutex_exit(&kcfpool->kp_thread_lock); 1553 1554 mutex_enter(&cpr_lock); 1555 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 1556 mutex_exit(&cpr_lock); 1557 is_logged = B_FALSE; 1558 } 1559 1560 /* 1561 * Get the requests from the queue and wait if needed. 1562 */ 1563 mutex_enter(&gswq->gs_lock); 1564 1565 while ((req = kcf_dequeue()) == NULL) { 1566 mutex_enter(&cpr_lock); 1567 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1568 mutex_exit(&cpr_lock); 1569 1570 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads); 1571 cv_wait(&gswq->gs_cv, &gswq->gs_lock); 1572 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads); 1573 1574 mutex_enter(&cpr_lock); 1575 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 1576 mutex_exit(&cpr_lock); 1577 } 1578 1579 mutex_exit(&gswq->gs_lock); 1580 1581 /* 1582 * We check the kp_threads since kcfd could have started 1583 * while we are waiting on the global software queue. 1584 */ 1585 if (kcfpool->kp_threads <= 0 && !is_logged) { 1586 cmn_err(CE_WARN, "kcfd is not running. Please check " 1587 "and restart kcfd. Using the failover kernel " 1588 "thread for now.\n"); 1589 is_logged = B_TRUE; 1590 } 1591 1592 /* 1593 * Get to work on the request. 1594 */ 1595 ictx = req->an_context; 1596 mutex_enter(&req->an_lock); 1597 req->an_state = REQ_INPROGRESS; 1598 mutex_exit(&req->an_lock); 1599 1600 error = common_submit_request(req->an_provider, ictx ? 1601 &ictx->kc_glbl_ctx : NULL, &req->an_params, req); 1602 1603 kcf_aop_done(req, error); 1604 } 1605 } 1606 1607 /* 1608 * Insert the async request in the hash table after assigning it 1609 * an ID. Returns the ID. 1610 * 1611 * The ID is used by the caller to pass as an argument to a 1612 * cancel_req() routine later. 1613 */ 1614 static crypto_req_id_t 1615 kcf_reqid_insert(kcf_areq_node_t *areq) 1616 { 1617 int indx; 1618 crypto_req_id_t id; 1619 kcf_areq_node_t *headp; 1620 kcf_reqid_table_t *rt = 1621 kcf_reqid_table[CPU->cpu_seqid & REQID_TABLE_MASK]; 1622 1623 mutex_enter(&rt->rt_lock); 1624 1625 rt->rt_curid = id = 1626 (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH; 1627 SET_REQID(areq, id); 1628 indx = REQID_HASH(id); 1629 headp = areq->an_idnext = rt->rt_idhash[indx]; 1630 areq->an_idprev = NULL; 1631 if (headp != NULL) 1632 headp->an_idprev = areq; 1633 1634 rt->rt_idhash[indx] = areq; 1635 mutex_exit(&rt->rt_lock); 1636 1637 return (id); 1638 } 1639 1640 /* 1641 * Delete the async request from the hash table. 1642 */ 1643 static void 1644 kcf_reqid_delete(kcf_areq_node_t *areq) 1645 { 1646 int indx; 1647 kcf_areq_node_t *nextp, *prevp; 1648 crypto_req_id_t id = GET_REQID(areq); 1649 kcf_reqid_table_t *rt; 1650 1651 rt = kcf_reqid_table[id & REQID_TABLE_MASK]; 1652 indx = REQID_HASH(id); 1653 1654 mutex_enter(&rt->rt_lock); 1655 1656 nextp = areq->an_idnext; 1657 prevp = areq->an_idprev; 1658 if (nextp != NULL) 1659 nextp->an_idprev = prevp; 1660 if (prevp != NULL) 1661 prevp->an_idnext = nextp; 1662 else 1663 rt->rt_idhash[indx] = nextp; 1664 1665 SET_REQID(areq, 0); 1666 cv_broadcast(&areq->an_done); 1667 1668 mutex_exit(&rt->rt_lock); 1669 } 1670 1671 /* 1672 * Cancel a single asynchronous request. 1673 * 1674 * We guarantee that no problems will result from calling 1675 * crypto_cancel_req() for a request which is either running, or 1676 * has already completed. We remove the request from any queues 1677 * if it is possible. We wait for request completion if the 1678 * request is dispatched to a provider. 1679 * 1680 * Calling context: 1681 * Can be called from user context only. 1682 * 1683 * NOTE: We acquire the following locks in this routine (in order): 1684 * - rt_lock (kcf_reqid_table_t) 1685 * - gswq->gs_lock 1686 * - areq->an_lock 1687 * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain()) 1688 * 1689 * This locking order MUST be maintained in code every where else. 1690 */ 1691 void 1692 crypto_cancel_req(crypto_req_id_t id) 1693 { 1694 int indx; 1695 kcf_areq_node_t *areq; 1696 kcf_provider_desc_t *pd; 1697 kcf_context_t *ictx; 1698 kcf_reqid_table_t *rt; 1699 1700 rt = kcf_reqid_table[id & REQID_TABLE_MASK]; 1701 indx = REQID_HASH(id); 1702 1703 mutex_enter(&rt->rt_lock); 1704 for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) { 1705 if (GET_REQID(areq) == id) { 1706 /* 1707 * We found the request. It is either still waiting 1708 * in the framework queues or running at the provider. 1709 */ 1710 pd = areq->an_provider; 1711 ASSERT(pd != NULL); 1712 1713 switch (pd->pd_prov_type) { 1714 case CRYPTO_SW_PROVIDER: 1715 mutex_enter(&gswq->gs_lock); 1716 mutex_enter(&areq->an_lock); 1717 1718 /* This request can be safely canceled. */ 1719 if (areq->an_state <= REQ_WAITING) { 1720 /* Remove from gswq, global software queue. */ 1721 kcf_remove_node(areq); 1722 if ((ictx = areq->an_context) != NULL) 1723 kcf_removereq_in_ctxchain(ictx, areq); 1724 1725 mutex_exit(&areq->an_lock); 1726 mutex_exit(&gswq->gs_lock); 1727 mutex_exit(&rt->rt_lock); 1728 1729 /* Remove areq from hash table and free it. */ 1730 kcf_reqid_delete(areq); 1731 KCF_AREQ_REFRELE(areq); 1732 return; 1733 } 1734 1735 mutex_exit(&areq->an_lock); 1736 mutex_exit(&gswq->gs_lock); 1737 break; 1738 1739 case CRYPTO_HW_PROVIDER: 1740 /* 1741 * There is no interface to remove an entry 1742 * once it is on the taskq. So, we do not do 1743 * any thing for a hardware provider. 1744 */ 1745 break; 1746 } 1747 1748 /* 1749 * The request is running. Wait for the request completion 1750 * to notify us. 1751 */ 1752 KCF_AREQ_REFHOLD(areq); 1753 while (GET_REQID(areq) == id) 1754 cv_wait(&areq->an_done, &rt->rt_lock); 1755 KCF_AREQ_REFRELE(areq); 1756 break; 1757 } 1758 } 1759 1760 mutex_exit(&rt->rt_lock); 1761 } 1762 1763 /* 1764 * Cancel all asynchronous requests associated with the 1765 * passed in crypto context and free it. 1766 * 1767 * A client SHOULD NOT call this routine after calling a crypto_*_final 1768 * routine. This routine is called only during intermediate operations. 1769 * The client should not use the crypto context after this function returns 1770 * since we destroy it. 1771 * 1772 * Calling context: 1773 * Can be called from user context only. 1774 */ 1775 void 1776 crypto_cancel_ctx(crypto_context_t ctx) 1777 { 1778 kcf_context_t *ictx; 1779 kcf_areq_node_t *areq; 1780 1781 if (ctx == NULL) 1782 return; 1783 1784 ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private; 1785 1786 mutex_enter(&ictx->kc_in_use_lock); 1787 1788 /* Walk the chain and cancel each request */ 1789 while ((areq = ictx->kc_req_chain_first) != NULL) { 1790 /* 1791 * We have to drop the lock here as we may have 1792 * to wait for request completion. We hold the 1793 * request before dropping the lock though, so that it 1794 * won't be freed underneath us. 1795 */ 1796 KCF_AREQ_REFHOLD(areq); 1797 mutex_exit(&ictx->kc_in_use_lock); 1798 1799 crypto_cancel_req(GET_REQID(areq)); 1800 KCF_AREQ_REFRELE(areq); 1801 1802 mutex_enter(&ictx->kc_in_use_lock); 1803 } 1804 1805 mutex_exit(&ictx->kc_in_use_lock); 1806 KCF_CONTEXT_REFRELE(ictx); 1807 } 1808 1809 /* 1810 * Update kstats. 1811 */ 1812 static int 1813 kcf_misc_kstat_update(kstat_t *ksp, int rw) 1814 { 1815 uint_t tcnt; 1816 kcf_stats_t *ks_data; 1817 1818 if (rw == KSTAT_WRITE) 1819 return (EACCES); 1820 1821 ks_data = ksp->ks_data; 1822 1823 ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads; 1824 /* 1825 * The failover thread is counted in kp_idlethreads in 1826 * some corner cases. This is done to avoid doing more checks 1827 * when submitting a request. We account for those cases below. 1828 */ 1829 if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1)) 1830 tcnt--; 1831 ks_data->ks_idle_thrs.value.ui32 = tcnt; 1832 ks_data->ks_minthrs.value.ui32 = kcf_minthreads; 1833 ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads; 1834 ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs; 1835 ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs; 1836 ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc; 1837 ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc; 1838 1839 return (0); 1840 } 1841 1842 /* 1843 * Allocate and initiatize a kcf_dual_req, used for saving the arguments of 1844 * a dual operation or an atomic operation that has to be internally 1845 * simulated with multiple single steps. 1846 * crq determines the memory allocation flags. 1847 */ 1848 1849 kcf_dual_req_t * 1850 kcf_alloc_req(crypto_call_req_t *crq) 1851 { 1852 kcf_dual_req_t *kcr; 1853 1854 kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq)); 1855 1856 if (kcr == NULL) 1857 return (NULL); 1858 1859 /* Copy the whole crypto_call_req struct, as it isn't persistant */ 1860 if (crq != NULL) 1861 kcr->kr_callreq = *crq; 1862 else 1863 bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t)); 1864 kcr->kr_areq = NULL; 1865 kcr->kr_saveoffset = 0; 1866 kcr->kr_savelen = 0; 1867 1868 return (kcr); 1869 } 1870 1871 /* 1872 * Callback routine for the next part of a simulated dual part. 1873 * Schedules the next step. 1874 * 1875 * This routine can be called from interrupt context. 1876 */ 1877 void 1878 kcf_next_req(void *next_req_arg, int status) 1879 { 1880 kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg; 1881 kcf_req_params_t *params = &(next_req->kr_params); 1882 kcf_areq_node_t *areq = next_req->kr_areq; 1883 int error = status; 1884 kcf_provider_desc_t *pd; 1885 crypto_dual_data_t *ct; 1886 1887 /* Stop the processing if an error occured at this step */ 1888 if (error != CRYPTO_SUCCESS) { 1889 out: 1890 areq->an_reqarg = next_req->kr_callreq; 1891 KCF_AREQ_REFRELE(areq); 1892 kmem_free(next_req, sizeof (kcf_dual_req_t)); 1893 areq->an_isdual = B_FALSE; 1894 kcf_aop_done(areq, error); 1895 return; 1896 } 1897 1898 switch (params->rp_opgrp) { 1899 case KCF_OG_MAC: { 1900 1901 /* 1902 * The next req is submitted with the same reqid as the 1903 * first part. The consumer only got back that reqid, and 1904 * should still be able to cancel the operation during its 1905 * second step. 1906 */ 1907 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); 1908 crypto_ctx_template_t mac_tmpl; 1909 kcf_mech_entry_t *me; 1910 1911 ct = (crypto_dual_data_t *)mops->mo_data; 1912 mac_tmpl = (crypto_ctx_template_t)mops->mo_templ; 1913 1914 /* No expected recoverable failures, so no retry list */ 1915 pd = kcf_get_mech_provider(mops->mo_framework_mechtype, 1916 &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC, 1917 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2); 1918 1919 if (pd == NULL) { 1920 error = CRYPTO_MECH_NOT_SUPPORTED; 1921 goto out; 1922 } 1923 /* Validate the MAC context template here */ 1924 if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && 1925 (mac_tmpl != NULL)) { 1926 kcf_ctx_template_t *ctx_mac_tmpl; 1927 1928 ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl; 1929 1930 if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) { 1931 KCF_PROV_REFRELE(pd); 1932 error = CRYPTO_OLD_CTX_TEMPLATE; 1933 goto out; 1934 } 1935 mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl; 1936 } 1937 1938 break; 1939 } 1940 case KCF_OG_DECRYPT: { 1941 kcf_decrypt_ops_params_t *dcrops = 1942 &(params->rp_u.decrypt_params); 1943 1944 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; 1945 /* No expected recoverable failures, so no retry list */ 1946 pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype, 1947 NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC, 1948 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1); 1949 1950 if (pd == NULL) { 1951 error = CRYPTO_MECH_NOT_SUPPORTED; 1952 goto out; 1953 } 1954 break; 1955 } 1956 } 1957 1958 /* The second step uses len2 and offset2 of the dual_data */ 1959 next_req->kr_saveoffset = ct->dd_offset1; 1960 next_req->kr_savelen = ct->dd_len1; 1961 ct->dd_offset1 = ct->dd_offset2; 1962 ct->dd_len1 = ct->dd_len2; 1963 1964 /* preserve if the caller is restricted */ 1965 if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) { 1966 areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED; 1967 } else { 1968 areq->an_reqarg.cr_flag = 0; 1969 } 1970 1971 areq->an_reqarg.cr_callback_func = kcf_last_req; 1972 areq->an_reqarg.cr_callback_arg = next_req; 1973 areq->an_isdual = B_TRUE; 1974 1975 /* 1976 * We would like to call kcf_submit_request() here. But, 1977 * that is not possible as that routine allocates a new 1978 * kcf_areq_node_t request structure, while we need to 1979 * reuse the existing request structure. 1980 */ 1981 switch (pd->pd_prov_type) { 1982 case CRYPTO_SW_PROVIDER: 1983 error = common_submit_request(pd, NULL, params, 1984 KCF_RHNDL(KM_NOSLEEP)); 1985 break; 1986 1987 case CRYPTO_HW_PROVIDER: { 1988 kcf_provider_desc_t *old_pd; 1989 taskq_t *taskq = pd->pd_sched_info.ks_taskq; 1990 1991 /* 1992 * Set the params for the second step in the 1993 * dual-ops. 1994 */ 1995 areq->an_params = *params; 1996 old_pd = areq->an_provider; 1997 KCF_PROV_REFRELE(old_pd); 1998 KCF_PROV_REFHOLD(pd); 1999 areq->an_provider = pd; 2000 2001 /* 2002 * Note that we have to do a taskq_dispatch() 2003 * here as we may be in interrupt context. 2004 */ 2005 if (taskq_dispatch(taskq, process_req_hwp, areq, 2006 TQ_NOSLEEP) == (taskqid_t)0) { 2007 error = CRYPTO_HOST_MEMORY; 2008 } else { 2009 error = CRYPTO_QUEUED; 2010 } 2011 break; 2012 } 2013 } 2014 2015 /* 2016 * We have to release the holds on the request and the provider 2017 * in all cases. 2018 */ 2019 KCF_AREQ_REFRELE(areq); 2020 KCF_PROV_REFRELE(pd); 2021 2022 if (error != CRYPTO_QUEUED) { 2023 /* restore, clean up, and invoke the client's callback */ 2024 2025 ct->dd_offset1 = next_req->kr_saveoffset; 2026 ct->dd_len1 = next_req->kr_savelen; 2027 areq->an_reqarg = next_req->kr_callreq; 2028 kmem_free(next_req, sizeof (kcf_dual_req_t)); 2029 areq->an_isdual = B_FALSE; 2030 kcf_aop_done(areq, error); 2031 } 2032 } 2033 2034 /* 2035 * Last part of an emulated dual operation. 2036 * Clean up and restore ... 2037 */ 2038 void 2039 kcf_last_req(void *last_req_arg, int status) 2040 { 2041 kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg; 2042 2043 kcf_req_params_t *params = &(last_req->kr_params); 2044 kcf_areq_node_t *areq = last_req->kr_areq; 2045 crypto_dual_data_t *ct; 2046 2047 switch (params->rp_opgrp) { 2048 case KCF_OG_MAC: { 2049 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); 2050 2051 ct = (crypto_dual_data_t *)mops->mo_data; 2052 break; 2053 } 2054 case KCF_OG_DECRYPT: { 2055 kcf_decrypt_ops_params_t *dcrops = 2056 &(params->rp_u.decrypt_params); 2057 2058 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; 2059 break; 2060 } 2061 } 2062 ct->dd_offset1 = last_req->kr_saveoffset; 2063 ct->dd_len1 = last_req->kr_savelen; 2064 2065 /* The submitter used kcf_last_req as its callback */ 2066 2067 if (areq == NULL) { 2068 crypto_call_req_t *cr = &last_req->kr_callreq; 2069 2070 (*(cr->cr_callback_func))(cr->cr_callback_arg, status); 2071 kmem_free(last_req, sizeof (kcf_dual_req_t)); 2072 return; 2073 } 2074 areq->an_reqarg = last_req->kr_callreq; 2075 KCF_AREQ_REFRELE(areq); 2076 kmem_free(last_req, sizeof (kcf_dual_req_t)); 2077 areq->an_isdual = B_FALSE; 2078 kcf_aop_done(areq, status); 2079 } 2080