1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file contains the core framework routines for the 28 * kernel cryptographic framework. These routines are at the 29 * layer, between the kernel API/ioctls and the SPI. 30 */ 31 32 #include <sys/types.h> 33 #include <sys/errno.h> 34 #include <sys/kmem.h> 35 #include <sys/proc.h> 36 #include <sys/cpuvar.h> 37 #include <sys/cpupart.h> 38 #include <sys/ksynch.h> 39 #include <sys/callb.h> 40 #include <sys/cmn_err.h> 41 #include <sys/systm.h> 42 #include <sys/sysmacros.h> 43 #include <sys/kstat.h> 44 #include <sys/crypto/common.h> 45 #include <sys/crypto/impl.h> 46 #include <sys/crypto/sched_impl.h> 47 #include <sys/crypto/api.h> 48 #include <sys/crypto/spi.h> 49 #include <sys/taskq_impl.h> 50 #include <sys/ddi.h> 51 #include <sys/sunddi.h> 52 53 54 kcf_global_swq_t *gswq; /* Global software queue */ 55 56 /* Thread pool related variables */ 57 static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */ 58 int kcf_maxthreads = 2; 59 int kcf_minthreads = 1; 60 int kcf_thr_multiple = 2; /* Boot-time tunable for experimentation */ 61 static ulong_t kcf_idlethr_timeout; 62 static boolean_t kcf_sched_running = B_FALSE; 63 #define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */ 64 65 /* kmem caches used by the scheduler */ 66 static struct kmem_cache *kcf_sreq_cache; 67 static struct kmem_cache *kcf_areq_cache; 68 static struct kmem_cache *kcf_context_cache; 69 70 /* Global request ID table */ 71 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES]; 72 73 /* KCF stats. Not protected. */ 74 static kcf_stats_t kcf_ksdata = { 75 { "total threads in pool", KSTAT_DATA_UINT32}, 76 { "idle threads in pool", KSTAT_DATA_UINT32}, 77 { "min threads in pool", KSTAT_DATA_UINT32}, 78 { "max threads in pool", KSTAT_DATA_UINT32}, 79 { "requests in gswq", KSTAT_DATA_UINT32}, 80 { "max requests in gswq", KSTAT_DATA_UINT32}, 81 { "threads for HW taskq", KSTAT_DATA_UINT32}, 82 { "minalloc for HW taskq", KSTAT_DATA_UINT32}, 83 { "maxalloc for HW taskq", KSTAT_DATA_UINT32} 84 }; 85 86 static kstat_t *kcf_misc_kstat = NULL; 87 ulong_t kcf_swprov_hndl = 0; 88 89 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *, 90 kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t); 91 static int kcf_disp_sw_request(kcf_areq_node_t *); 92 static void process_req_hwp(void *); 93 static kcf_areq_node_t *kcf_dequeue(); 94 static int kcf_enqueue(kcf_areq_node_t *); 95 static void kcf_failover_thread(); 96 static void kcfpool_alloc(); 97 static void kcf_reqid_delete(kcf_areq_node_t *areq); 98 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq); 99 static int kcf_misc_kstat_update(kstat_t *ksp, int rw); 100 static void compute_min_max_threads(); 101 102 103 /* 104 * Create a new context. 105 */ 106 crypto_ctx_t * 107 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd, 108 crypto_session_id_t sid) 109 { 110 crypto_ctx_t *ctx; 111 kcf_context_t *kcf_ctx; 112 113 kcf_ctx = kmem_cache_alloc(kcf_context_cache, 114 (crq == NULL) ? KM_SLEEP : KM_NOSLEEP); 115 if (kcf_ctx == NULL) 116 return (NULL); 117 118 /* initialize the context for the consumer */ 119 kcf_ctx->kc_refcnt = 1; 120 kcf_ctx->kc_req_chain_first = NULL; 121 kcf_ctx->kc_req_chain_last = NULL; 122 kcf_ctx->kc_secondctx = NULL; 123 KCF_PROV_REFHOLD(pd); 124 kcf_ctx->kc_prov_desc = pd; 125 kcf_ctx->kc_sw_prov_desc = NULL; 126 kcf_ctx->kc_mech = NULL; 127 128 ctx = &kcf_ctx->kc_glbl_ctx; 129 ctx->cc_provider = pd->pd_prov_handle; 130 ctx->cc_session = sid; 131 ctx->cc_provider_private = NULL; 132 ctx->cc_framework_private = (void *)kcf_ctx; 133 ctx->cc_flags = 0; 134 ctx->cc_opstate = NULL; 135 136 return (ctx); 137 } 138 139 /* 140 * Allocate a new async request node. 141 * 142 * ictx - Framework private context pointer 143 * crq - Has callback function and argument. Should be non NULL. 144 * req - The parameters to pass to the SPI 145 */ 146 static kcf_areq_node_t * 147 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx, 148 crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual) 149 { 150 kcf_areq_node_t *arptr, *areq; 151 152 ASSERT(crq != NULL); 153 arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP); 154 if (arptr == NULL) 155 return (NULL); 156 157 arptr->an_state = REQ_ALLOCATED; 158 arptr->an_reqarg = *crq; 159 arptr->an_params = *req; 160 arptr->an_context = ictx; 161 arptr->an_isdual = isdual; 162 163 arptr->an_next = arptr->an_prev = NULL; 164 KCF_PROV_REFHOLD(pd); 165 arptr->an_provider = pd; 166 arptr->an_tried_plist = NULL; 167 arptr->an_refcnt = 1; 168 arptr->an_idnext = arptr->an_idprev = NULL; 169 170 /* 171 * Requests for context-less operations do not use the 172 * fields - an_is_my_turn, and an_ctxchain_next. 173 */ 174 if (ictx == NULL) 175 return (arptr); 176 177 KCF_CONTEXT_REFHOLD(ictx); 178 /* 179 * Chain this request to the context. 180 */ 181 mutex_enter(&ictx->kc_in_use_lock); 182 arptr->an_ctxchain_next = NULL; 183 if ((areq = ictx->kc_req_chain_last) == NULL) { 184 arptr->an_is_my_turn = B_TRUE; 185 ictx->kc_req_chain_last = 186 ictx->kc_req_chain_first = arptr; 187 } else { 188 ASSERT(ictx->kc_req_chain_first != NULL); 189 arptr->an_is_my_turn = B_FALSE; 190 /* Insert the new request to the end of the chain. */ 191 areq->an_ctxchain_next = arptr; 192 ictx->kc_req_chain_last = arptr; 193 } 194 mutex_exit(&ictx->kc_in_use_lock); 195 196 return (arptr); 197 } 198 199 /* 200 * Queue the request node and do one of the following: 201 * - If there is an idle thread signal it to run. 202 * - If there is no idle thread and max running threads is not 203 * reached, signal the creator thread for more threads. 204 * 205 * If the two conditions above are not met, we don't need to do 206 * any thing. The request will be picked up by one of the 207 * worker threads when it becomes available. 208 */ 209 static int 210 kcf_disp_sw_request(kcf_areq_node_t *areq) 211 { 212 int err; 213 int cnt = 0; 214 215 if ((err = kcf_enqueue(areq)) != 0) 216 return (err); 217 218 if (kcfpool->kp_idlethreads > 0) { 219 /* Signal an idle thread to run */ 220 mutex_enter(&gswq->gs_lock); 221 cv_signal(&gswq->gs_cv); 222 mutex_exit(&gswq->gs_lock); 223 224 return (CRYPTO_QUEUED); 225 } 226 227 /* 228 * We keep the number of running threads to be at 229 * kcf_minthreads to reduce gs_lock contention. 230 */ 231 cnt = kcf_minthreads - 232 (kcfpool->kp_threads - kcfpool->kp_blockedthreads); 233 if (cnt > 0) { 234 /* 235 * The following ensures the number of threads in pool 236 * does not exceed kcf_maxthreads. 237 */ 238 cnt = min(cnt, kcf_maxthreads - kcfpool->kp_threads); 239 if (cnt > 0) { 240 /* Signal the creator thread for more threads */ 241 mutex_enter(&kcfpool->kp_user_lock); 242 if (!kcfpool->kp_signal_create_thread) { 243 kcfpool->kp_signal_create_thread = B_TRUE; 244 kcfpool->kp_nthrs = cnt; 245 cv_signal(&kcfpool->kp_user_cv); 246 } 247 mutex_exit(&kcfpool->kp_user_lock); 248 } 249 } 250 251 return (CRYPTO_QUEUED); 252 } 253 254 /* 255 * This routine is called by the taskq associated with 256 * each hardware provider. We notify the kernel consumer 257 * via the callback routine in case of CRYPTO_SUCCESS or 258 * a failure. 259 * 260 * A request can be of type kcf_areq_node_t or of type 261 * kcf_sreq_node_t. 262 */ 263 static void 264 process_req_hwp(void *ireq) 265 { 266 int error = 0; 267 crypto_ctx_t *ctx; 268 kcf_call_type_t ctype; 269 kcf_provider_desc_t *pd; 270 kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq; 271 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq; 272 kcf_prov_cpu_t *mp; 273 274 pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ? 275 sreq->sn_provider : areq->an_provider; 276 277 /* 278 * Wait if flow control is in effect for the provider. A 279 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED 280 * notification will signal us. We also get signaled if 281 * the provider is unregistering. 282 */ 283 if (pd->pd_state == KCF_PROV_BUSY) { 284 mutex_enter(&pd->pd_lock); 285 while (pd->pd_state == KCF_PROV_BUSY) 286 cv_wait(&pd->pd_resume_cv, &pd->pd_lock); 287 mutex_exit(&pd->pd_lock); 288 } 289 290 /* 291 * Bump the internal reference count while the request is being 292 * processed. This is how we know when it's safe to unregister 293 * a provider. This step must precede the pd_state check below. 294 */ 295 mp = &(pd->pd_percpu_bins[CPU_SEQID]); 296 KCF_PROV_JOB_HOLD(mp); 297 298 /* 299 * Fail the request if the provider has failed. We return a 300 * recoverable error and the notified clients attempt any 301 * recovery. For async clients this is done in kcf_aop_done() 302 * and for sync clients it is done in the k-api routines. 303 */ 304 if (pd->pd_state >= KCF_PROV_FAILED) { 305 error = CRYPTO_DEVICE_ERROR; 306 goto bail; 307 } 308 309 if (ctype == CRYPTO_SYNCH) { 310 mutex_enter(&sreq->sn_lock); 311 sreq->sn_state = REQ_INPROGRESS; 312 sreq->sn_mp = mp; 313 mutex_exit(&sreq->sn_lock); 314 315 ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL; 316 error = common_submit_request(sreq->sn_provider, ctx, 317 sreq->sn_params, sreq); 318 } else { 319 kcf_context_t *ictx; 320 ASSERT(ctype == CRYPTO_ASYNCH); 321 322 /* 323 * We are in the per-hardware provider thread context and 324 * hence can sleep. Note that the caller would have done 325 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned. 326 */ 327 ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL; 328 329 mutex_enter(&areq->an_lock); 330 /* 331 * We need to maintain ordering for multi-part requests. 332 * an_is_my_turn is set to B_TRUE initially for a request 333 * when it is enqueued and there are no other requests 334 * for that context. It is set later from kcf_aop_done() when 335 * the request before us in the chain of requests for the 336 * context completes. We get signaled at that point. 337 */ 338 if (ictx != NULL) { 339 ASSERT(ictx->kc_prov_desc == areq->an_provider); 340 341 while (areq->an_is_my_turn == B_FALSE) { 342 cv_wait(&areq->an_turn_cv, &areq->an_lock); 343 } 344 } 345 areq->an_state = REQ_INPROGRESS; 346 areq->an_mp = mp; 347 mutex_exit(&areq->an_lock); 348 349 error = common_submit_request(areq->an_provider, ctx, 350 &areq->an_params, areq); 351 } 352 353 bail: 354 if (error == CRYPTO_QUEUED) { 355 /* 356 * The request is queued by the provider and we should 357 * get a crypto_op_notification() from the provider later. 358 * We notify the consumer at that time. 359 */ 360 return; 361 } else { /* CRYPTO_SUCCESS or other failure */ 362 KCF_PROV_JOB_RELE(mp); 363 if (ctype == CRYPTO_SYNCH) 364 kcf_sop_done(sreq, error); 365 else 366 kcf_aop_done(areq, error); 367 } 368 } 369 370 /* 371 * This routine checks if a request can be retried on another 372 * provider. If true, mech1 is initialized to point to the mechanism 373 * structure. mech2 is also initialized in case of a dual operation. fg 374 * is initialized to the correct crypto_func_group_t bit flag. They are 375 * initialized by this routine, so that the caller can pass them to a 376 * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change. 377 * 378 * We check that the request is for a init or atomic routine and that 379 * it is for one of the operation groups used from k-api . 380 */ 381 static boolean_t 382 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1, 383 crypto_mechanism_t **mech2, crypto_func_group_t *fg) 384 { 385 kcf_req_params_t *params; 386 kcf_op_type_t optype; 387 388 params = &areq->an_params; 389 optype = params->rp_optype; 390 391 if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype))) 392 return (B_FALSE); 393 394 switch (params->rp_opgrp) { 395 case KCF_OG_DIGEST: { 396 kcf_digest_ops_params_t *dops = ¶ms->rp_u.digest_params; 397 398 dops->do_mech.cm_type = dops->do_framework_mechtype; 399 *mech1 = &dops->do_mech; 400 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST : 401 CRYPTO_FG_DIGEST_ATOMIC; 402 break; 403 } 404 405 case KCF_OG_MAC: { 406 kcf_mac_ops_params_t *mops = ¶ms->rp_u.mac_params; 407 408 mops->mo_mech.cm_type = mops->mo_framework_mechtype; 409 *mech1 = &mops->mo_mech; 410 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC : 411 CRYPTO_FG_MAC_ATOMIC; 412 break; 413 } 414 415 case KCF_OG_SIGN: { 416 kcf_sign_ops_params_t *sops = ¶ms->rp_u.sign_params; 417 418 sops->so_mech.cm_type = sops->so_framework_mechtype; 419 *mech1 = &sops->so_mech; 420 switch (optype) { 421 case KCF_OP_INIT: 422 *fg = CRYPTO_FG_SIGN; 423 break; 424 case KCF_OP_ATOMIC: 425 *fg = CRYPTO_FG_SIGN_ATOMIC; 426 break; 427 default: 428 ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC); 429 *fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC; 430 } 431 break; 432 } 433 434 case KCF_OG_VERIFY: { 435 kcf_verify_ops_params_t *vops = ¶ms->rp_u.verify_params; 436 437 vops->vo_mech.cm_type = vops->vo_framework_mechtype; 438 *mech1 = &vops->vo_mech; 439 switch (optype) { 440 case KCF_OP_INIT: 441 *fg = CRYPTO_FG_VERIFY; 442 break; 443 case KCF_OP_ATOMIC: 444 *fg = CRYPTO_FG_VERIFY_ATOMIC; 445 break; 446 default: 447 ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC); 448 *fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC; 449 } 450 break; 451 } 452 453 case KCF_OG_ENCRYPT: { 454 kcf_encrypt_ops_params_t *eops = ¶ms->rp_u.encrypt_params; 455 456 eops->eo_mech.cm_type = eops->eo_framework_mechtype; 457 *mech1 = &eops->eo_mech; 458 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT : 459 CRYPTO_FG_ENCRYPT_ATOMIC; 460 break; 461 } 462 463 case KCF_OG_DECRYPT: { 464 kcf_decrypt_ops_params_t *dcrops = ¶ms->rp_u.decrypt_params; 465 466 dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype; 467 *mech1 = &dcrops->dop_mech; 468 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT : 469 CRYPTO_FG_DECRYPT_ATOMIC; 470 break; 471 } 472 473 case KCF_OG_ENCRYPT_MAC: { 474 kcf_encrypt_mac_ops_params_t *eops = 475 ¶ms->rp_u.encrypt_mac_params; 476 477 eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype; 478 *mech1 = &eops->em_encr_mech; 479 eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype; 480 *mech2 = &eops->em_mac_mech; 481 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC : 482 CRYPTO_FG_ENCRYPT_MAC_ATOMIC; 483 break; 484 } 485 486 case KCF_OG_MAC_DECRYPT: { 487 kcf_mac_decrypt_ops_params_t *dops = 488 ¶ms->rp_u.mac_decrypt_params; 489 490 dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype; 491 *mech1 = &dops->md_mac_mech; 492 dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype; 493 *mech2 = &dops->md_decr_mech; 494 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT : 495 CRYPTO_FG_MAC_DECRYPT_ATOMIC; 496 break; 497 } 498 499 default: 500 return (B_FALSE); 501 } 502 503 return (B_TRUE); 504 } 505 506 /* 507 * This routine is called when a request to a provider has failed 508 * with a recoverable error. This routine tries to find another provider 509 * and dispatches the request to the new provider, if one is available. 510 * We reuse the request structure. 511 * 512 * A return value of NULL from kcf_get_mech_provider() indicates 513 * we have tried the last provider. 514 */ 515 static int 516 kcf_resubmit_request(kcf_areq_node_t *areq) 517 { 518 int error = CRYPTO_FAILED; 519 kcf_context_t *ictx; 520 kcf_provider_desc_t *old_pd; 521 kcf_provider_desc_t *new_pd; 522 crypto_mechanism_t *mech1 = NULL, *mech2 = NULL; 523 crypto_mech_type_t prov_mt1, prov_mt2; 524 crypto_func_group_t fg; 525 526 if (!can_resubmit(areq, &mech1, &mech2, &fg)) 527 return (error); 528 529 old_pd = areq->an_provider; 530 /* 531 * Add old_pd to the list of providers already tried. 532 * We release the new hold on old_pd in kcf_free_triedlist(). 533 */ 534 if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd, 535 KM_NOSLEEP | KCF_HOLD_PROV) == NULL) 536 return (error); 537 538 if (mech1 && !mech2) { 539 new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, NULL, 540 &error, areq->an_tried_plist, fg, 541 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); 542 } else { 543 ASSERT(mech1 != NULL && mech2 != NULL); 544 545 new_pd = kcf_get_dual_provider(mech1, NULL, mech2, NULL, 546 NULL, &prov_mt1, 547 &prov_mt2, &error, areq->an_tried_plist, fg, fg, 548 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); 549 } 550 551 if (new_pd == NULL) 552 return (error); 553 554 /* 555 * We reuse the old context by resetting provider specific 556 * fields in it. 557 */ 558 if ((ictx = areq->an_context) != NULL) { 559 crypto_ctx_t *ctx; 560 561 ASSERT(old_pd == ictx->kc_prov_desc); 562 KCF_PROV_REFRELE(ictx->kc_prov_desc); 563 KCF_PROV_REFHOLD(new_pd); 564 ictx->kc_prov_desc = new_pd; 565 566 ctx = &ictx->kc_glbl_ctx; 567 ctx->cc_provider = new_pd->pd_prov_handle; 568 ctx->cc_session = new_pd->pd_sid; 569 ctx->cc_provider_private = NULL; 570 } 571 572 /* We reuse areq. by resetting the provider and context fields. */ 573 KCF_PROV_REFRELE(old_pd); 574 KCF_PROV_REFHOLD(new_pd); 575 areq->an_provider = new_pd; 576 mutex_enter(&areq->an_lock); 577 areq->an_state = REQ_WAITING; 578 mutex_exit(&areq->an_lock); 579 580 switch (new_pd->pd_prov_type) { 581 case CRYPTO_SW_PROVIDER: 582 error = kcf_disp_sw_request(areq); 583 break; 584 585 case CRYPTO_HW_PROVIDER: { 586 taskq_t *taskq = new_pd->pd_taskq; 587 588 if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == 589 (taskqid_t)0) { 590 error = CRYPTO_HOST_MEMORY; 591 } else { 592 error = CRYPTO_QUEUED; 593 } 594 595 break; 596 } 597 } 598 599 KCF_PROV_REFRELE(new_pd); 600 return (error); 601 } 602 603 #define EMPTY_TASKQ(tq) ((tq)->tq_task.tqent_next == &(tq)->tq_task) 604 605 /* 606 * Routine called by both ioctl and k-api. The consumer should 607 * bundle the parameters into a kcf_req_params_t structure. A bunch 608 * of macros are available in ops_impl.h for this bundling. They are: 609 * 610 * KCF_WRAP_DIGEST_OPS_PARAMS() 611 * KCF_WRAP_MAC_OPS_PARAMS() 612 * KCF_WRAP_ENCRYPT_OPS_PARAMS() 613 * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc. 614 * 615 * It is the caller's responsibility to free the ctx argument when 616 * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details. 617 */ 618 int 619 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 620 crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont) 621 { 622 int error; 623 kcf_areq_node_t *areq; 624 kcf_sreq_node_t *sreq; 625 kcf_context_t *kcf_ctx; 626 taskq_t *taskq; 627 kcf_prov_cpu_t *mp; 628 629 kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL; 630 631 /* Synchronous cases */ 632 if (crq == NULL) { 633 switch (pd->pd_prov_type) { 634 case CRYPTO_SW_PROVIDER: 635 error = common_submit_request(pd, ctx, params, 636 KCF_RHNDL(KM_SLEEP)); 637 break; 638 639 case CRYPTO_HW_PROVIDER: 640 taskq = pd->pd_taskq; 641 642 /* 643 * Special case for CRYPTO_SYNCHRONOUS providers that 644 * never return a CRYPTO_QUEUED error. We skip any 645 * request allocation and call the SPI directly. 646 */ 647 if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) && 648 EMPTY_TASKQ(taskq)) { 649 mp = &(pd->pd_percpu_bins[CPU_SEQID]); 650 KCF_PROV_JOB_HOLD(mp); 651 652 if (pd->pd_state == KCF_PROV_READY) { 653 error = common_submit_request(pd, ctx, 654 params, KCF_RHNDL(KM_SLEEP)); 655 KCF_PROV_JOB_RELE(mp); 656 ASSERT(error != CRYPTO_QUEUED); 657 break; 658 } 659 KCF_PROV_JOB_RELE(mp); 660 } 661 662 sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP); 663 sreq->sn_state = REQ_ALLOCATED; 664 sreq->sn_rv = CRYPTO_FAILED; 665 sreq->sn_params = params; 666 667 /* 668 * Note that we do not need to hold the context 669 * for synchronous case as the context will never 670 * become invalid underneath us. We do not need to hold 671 * the provider here either as the caller has a hold. 672 */ 673 sreq->sn_context = kcf_ctx; 674 ASSERT(KCF_PROV_REFHELD(pd)); 675 sreq->sn_provider = pd; 676 677 ASSERT(taskq != NULL); 678 /* 679 * Call the SPI directly if the taskq is empty and the 680 * provider is not busy, else dispatch to the taskq. 681 * Calling directly is fine as this is the synchronous 682 * case. This is unlike the asynchronous case where we 683 * must always dispatch to the taskq. 684 */ 685 if (EMPTY_TASKQ(taskq) && 686 pd->pd_state == KCF_PROV_READY) { 687 process_req_hwp(sreq); 688 } else { 689 /* 690 * We can not tell from taskq_dispatch() return 691 * value if we exceeded maxalloc. Hence the 692 * check here. Since we are allowed to wait in 693 * the synchronous case, we wait for the taskq 694 * to become empty. 695 */ 696 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { 697 taskq_wait(taskq); 698 } 699 700 (void) taskq_dispatch(taskq, process_req_hwp, 701 sreq, TQ_SLEEP); 702 } 703 704 /* 705 * Wait for the notification to arrive, 706 * if the operation is not done yet. 707 * Bug# 4722589 will make the wait a cv_wait_sig(). 708 */ 709 mutex_enter(&sreq->sn_lock); 710 while (sreq->sn_state < REQ_DONE) 711 cv_wait(&sreq->sn_cv, &sreq->sn_lock); 712 mutex_exit(&sreq->sn_lock); 713 714 error = sreq->sn_rv; 715 kmem_cache_free(kcf_sreq_cache, sreq); 716 717 break; 718 719 default: 720 error = CRYPTO_FAILED; 721 break; 722 } 723 724 } else { /* Asynchronous cases */ 725 switch (pd->pd_prov_type) { 726 case CRYPTO_SW_PROVIDER: 727 if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) { 728 /* 729 * This case has less overhead since there is 730 * no switching of context. 731 */ 732 error = common_submit_request(pd, ctx, params, 733 KCF_RHNDL(KM_NOSLEEP)); 734 } else { 735 /* 736 * CRYPTO_ALWAYS_QUEUE is set. We need to 737 * queue the request and return. 738 */ 739 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, 740 params, cont); 741 if (areq == NULL) 742 error = CRYPTO_HOST_MEMORY; 743 else { 744 if (!(crq->cr_flag 745 & CRYPTO_SKIP_REQID)) { 746 /* 747 * Set the request handle. This handle 748 * is used for any crypto_cancel_req(9f) 749 * calls from the consumer. We have to 750 * do this before dispatching the 751 * request. 752 */ 753 crq->cr_reqid = kcf_reqid_insert(areq); 754 } 755 756 error = kcf_disp_sw_request(areq); 757 /* 758 * There is an error processing this 759 * request. Remove the handle and 760 * release the request structure. 761 */ 762 if (error != CRYPTO_QUEUED) { 763 if (!(crq->cr_flag 764 & CRYPTO_SKIP_REQID)) 765 kcf_reqid_delete(areq); 766 KCF_AREQ_REFRELE(areq); 767 } 768 } 769 } 770 break; 771 772 case CRYPTO_HW_PROVIDER: 773 /* 774 * We need to queue the request and return. 775 */ 776 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params, 777 cont); 778 if (areq == NULL) { 779 error = CRYPTO_HOST_MEMORY; 780 goto done; 781 } 782 783 taskq = pd->pd_taskq; 784 ASSERT(taskq != NULL); 785 /* 786 * We can not tell from taskq_dispatch() return 787 * value if we exceeded maxalloc. Hence the check 788 * here. 789 */ 790 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { 791 error = CRYPTO_BUSY; 792 KCF_AREQ_REFRELE(areq); 793 goto done; 794 } 795 796 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) { 797 /* 798 * Set the request handle. This handle is used 799 * for any crypto_cancel_req(9f) calls from the 800 * consumer. We have to do this before dispatching 801 * the request. 802 */ 803 crq->cr_reqid = kcf_reqid_insert(areq); 804 } 805 806 if (taskq_dispatch(taskq, 807 process_req_hwp, areq, TQ_NOSLEEP) == 808 (taskqid_t)0) { 809 error = CRYPTO_HOST_MEMORY; 810 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) 811 kcf_reqid_delete(areq); 812 KCF_AREQ_REFRELE(areq); 813 } else { 814 error = CRYPTO_QUEUED; 815 } 816 break; 817 818 default: 819 error = CRYPTO_FAILED; 820 break; 821 } 822 } 823 824 done: 825 return (error); 826 } 827 828 /* 829 * We're done with this framework context, so free it. Note that freeing 830 * framework context (kcf_context) frees the global context (crypto_ctx). 831 * 832 * The provider is responsible for freeing provider private context after a 833 * final or single operation and resetting the cc_provider_private field 834 * to NULL. It should do this before it notifies the framework of the 835 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases 836 * like crypto_cancel_ctx(9f). 837 */ 838 void 839 kcf_free_context(kcf_context_t *kcf_ctx) 840 { 841 kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc; 842 crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx; 843 kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx; 844 kcf_prov_cpu_t *mp; 845 846 /* Release the second context, if any */ 847 848 if (kcf_secondctx != NULL) 849 KCF_CONTEXT_REFRELE(kcf_secondctx); 850 851 if (gctx->cc_provider_private != NULL) { 852 mutex_enter(&pd->pd_lock); 853 if (!KCF_IS_PROV_REMOVED(pd)) { 854 /* 855 * Increment the provider's internal refcnt so it 856 * doesn't unregister from the framework while 857 * we're calling the entry point. 858 */ 859 mp = &(pd->pd_percpu_bins[CPU_SEQID]); 860 KCF_PROV_JOB_HOLD(mp); 861 mutex_exit(&pd->pd_lock); 862 (void) KCF_PROV_FREE_CONTEXT(pd, gctx); 863 KCF_PROV_JOB_RELE(mp); 864 } else { 865 mutex_exit(&pd->pd_lock); 866 } 867 } 868 869 /* kcf_ctx->kc_prov_desc has a hold on pd */ 870 KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc); 871 872 /* check if this context is shared with a software provider */ 873 if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) && 874 kcf_ctx->kc_sw_prov_desc != NULL) { 875 KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc); 876 } 877 878 kmem_cache_free(kcf_context_cache, kcf_ctx); 879 } 880 881 /* 882 * Free the request after releasing all the holds. 883 */ 884 void 885 kcf_free_req(kcf_areq_node_t *areq) 886 { 887 KCF_PROV_REFRELE(areq->an_provider); 888 if (areq->an_context != NULL) 889 KCF_CONTEXT_REFRELE(areq->an_context); 890 891 if (areq->an_tried_plist != NULL) 892 kcf_free_triedlist(areq->an_tried_plist); 893 kmem_cache_free(kcf_areq_cache, areq); 894 } 895 896 /* 897 * Utility routine to remove a request from the chain of requests 898 * hanging off a context. 899 */ 900 void 901 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq) 902 { 903 kcf_areq_node_t *cur, *prev; 904 905 /* 906 * Get context lock, search for areq in the chain and remove it. 907 */ 908 ASSERT(ictx != NULL); 909 mutex_enter(&ictx->kc_in_use_lock); 910 prev = cur = ictx->kc_req_chain_first; 911 912 while (cur != NULL) { 913 if (cur == areq) { 914 if (prev == cur) { 915 if ((ictx->kc_req_chain_first = 916 cur->an_ctxchain_next) == NULL) 917 ictx->kc_req_chain_last = NULL; 918 } else { 919 if (cur == ictx->kc_req_chain_last) 920 ictx->kc_req_chain_last = prev; 921 prev->an_ctxchain_next = cur->an_ctxchain_next; 922 } 923 924 break; 925 } 926 prev = cur; 927 cur = cur->an_ctxchain_next; 928 } 929 mutex_exit(&ictx->kc_in_use_lock); 930 } 931 932 /* 933 * Remove the specified node from the global software queue. 934 * 935 * The caller must hold the queue lock and request lock (an_lock). 936 */ 937 void 938 kcf_remove_node(kcf_areq_node_t *node) 939 { 940 kcf_areq_node_t *nextp = node->an_next; 941 kcf_areq_node_t *prevp = node->an_prev; 942 943 ASSERT(mutex_owned(&gswq->gs_lock)); 944 945 if (nextp != NULL) 946 nextp->an_prev = prevp; 947 else 948 gswq->gs_last = prevp; 949 950 if (prevp != NULL) 951 prevp->an_next = nextp; 952 else 953 gswq->gs_first = nextp; 954 955 ASSERT(mutex_owned(&node->an_lock)); 956 node->an_state = REQ_CANCELED; 957 } 958 959 /* 960 * Remove and return the first node in the global software queue. 961 * 962 * The caller must hold the queue lock. 963 */ 964 static kcf_areq_node_t * 965 kcf_dequeue() 966 { 967 kcf_areq_node_t *tnode = NULL; 968 969 ASSERT(mutex_owned(&gswq->gs_lock)); 970 if ((tnode = gswq->gs_first) == NULL) { 971 return (NULL); 972 } else { 973 ASSERT(gswq->gs_first->an_prev == NULL); 974 gswq->gs_first = tnode->an_next; 975 if (tnode->an_next == NULL) 976 gswq->gs_last = NULL; 977 else 978 tnode->an_next->an_prev = NULL; 979 } 980 981 gswq->gs_njobs--; 982 return (tnode); 983 } 984 985 /* 986 * Add the request node to the end of the global software queue. 987 * 988 * The caller should not hold the queue lock. Returns 0 if the 989 * request is successfully queued. Returns CRYPTO_BUSY if the limit 990 * on the number of jobs is exceeded. 991 */ 992 static int 993 kcf_enqueue(kcf_areq_node_t *node) 994 { 995 kcf_areq_node_t *tnode; 996 997 mutex_enter(&gswq->gs_lock); 998 999 if (gswq->gs_njobs >= gswq->gs_maxjobs) { 1000 mutex_exit(&gswq->gs_lock); 1001 return (CRYPTO_BUSY); 1002 } 1003 1004 if (gswq->gs_last == NULL) { 1005 gswq->gs_first = gswq->gs_last = node; 1006 } else { 1007 ASSERT(gswq->gs_last->an_next == NULL); 1008 tnode = gswq->gs_last; 1009 tnode->an_next = node; 1010 gswq->gs_last = node; 1011 node->an_prev = tnode; 1012 } 1013 1014 gswq->gs_njobs++; 1015 1016 /* an_lock not needed here as we hold gs_lock */ 1017 node->an_state = REQ_WAITING; 1018 1019 mutex_exit(&gswq->gs_lock); 1020 1021 return (0); 1022 } 1023 1024 /* 1025 * Decrement the thread pool count and signal the failover 1026 * thread if we are the last one out. 1027 */ 1028 static void 1029 kcf_decrcnt_andsignal() 1030 { 1031 KCF_ATOMIC_DECR(kcfpool->kp_threads); 1032 1033 mutex_enter(&kcfpool->kp_thread_lock); 1034 if (kcfpool->kp_threads == 0) 1035 cv_signal(&kcfpool->kp_nothr_cv); 1036 mutex_exit(&kcfpool->kp_thread_lock); 1037 } 1038 1039 /* 1040 * Function run by a thread from kcfpool to work on global software queue. 1041 * It is called from ioctl(CRYPTO_POOL_RUN, ...). 1042 */ 1043 int 1044 kcf_svc_do_run(void) 1045 { 1046 int error = 0; 1047 clock_t rv; 1048 clock_t timeout_val = drv_usectohz(kcf_idlethr_timeout); 1049 kcf_areq_node_t *req; 1050 kcf_context_t *ictx; 1051 kcf_provider_desc_t *pd; 1052 1053 KCF_ATOMIC_INCR(kcfpool->kp_threads); 1054 1055 for (;;) { 1056 mutex_enter(&gswq->gs_lock); 1057 1058 while ((req = kcf_dequeue()) == NULL) { 1059 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads); 1060 rv = cv_reltimedwait_sig(&gswq->gs_cv, 1061 &gswq->gs_lock, timeout_val, TR_CLOCK_TICK); 1062 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads); 1063 1064 switch (rv) { 1065 case 0: 1066 /* 1067 * A signal (as in kill(2)) is pending. We did 1068 * not get any cv_signal(). 1069 */ 1070 kcf_decrcnt_andsignal(); 1071 mutex_exit(&gswq->gs_lock); 1072 return (EINTR); 1073 1074 case -1: 1075 /* 1076 * Timed out and we are not signaled. Let us 1077 * see if this thread should exit. We should 1078 * keep at least kcf_minthreads. 1079 */ 1080 if (kcfpool->kp_threads > kcf_minthreads) { 1081 kcf_decrcnt_andsignal(); 1082 mutex_exit(&gswq->gs_lock); 1083 return (0); 1084 } 1085 1086 /* Resume the wait for work */ 1087 break; 1088 1089 default: 1090 /* 1091 * We are signaled to work on the queue. 1092 */ 1093 break; 1094 } 1095 } 1096 1097 mutex_exit(&gswq->gs_lock); 1098 1099 ictx = req->an_context; 1100 if (ictx == NULL) { /* Context-less operation */ 1101 pd = req->an_provider; 1102 error = common_submit_request(pd, NULL, 1103 &req->an_params, req); 1104 kcf_aop_done(req, error); 1105 continue; 1106 } 1107 1108 /* 1109 * We check if we can work on the request now. 1110 * Solaris does not guarantee any order on how the threads 1111 * are scheduled or how the waiters on a mutex are chosen. 1112 * So, we need to maintain our own order. 1113 * 1114 * is_my_turn is set to B_TRUE initially for a request when 1115 * it is enqueued and there are no other requests 1116 * for that context. Note that a thread sleeping on 1117 * an_turn_cv is not counted as an idle thread. This is 1118 * because we define an idle thread as one that sleeps on the 1119 * global queue waiting for new requests. 1120 */ 1121 mutex_enter(&req->an_lock); 1122 while (req->an_is_my_turn == B_FALSE) { 1123 KCF_ATOMIC_INCR(kcfpool->kp_blockedthreads); 1124 cv_wait(&req->an_turn_cv, &req->an_lock); 1125 KCF_ATOMIC_DECR(kcfpool->kp_blockedthreads); 1126 } 1127 1128 req->an_state = REQ_INPROGRESS; 1129 mutex_exit(&req->an_lock); 1130 1131 pd = ictx->kc_prov_desc; 1132 ASSERT(pd == req->an_provider); 1133 error = common_submit_request(pd, &ictx->kc_glbl_ctx, 1134 &req->an_params, req); 1135 1136 kcf_aop_done(req, error); 1137 } 1138 } 1139 1140 /* 1141 * kmem_cache_alloc constructor for sync request structure. 1142 */ 1143 /* ARGSUSED */ 1144 static int 1145 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags) 1146 { 1147 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; 1148 1149 sreq->sn_type = CRYPTO_SYNCH; 1150 cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL); 1151 mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL); 1152 1153 return (0); 1154 } 1155 1156 /* ARGSUSED */ 1157 static void 1158 kcf_sreq_cache_destructor(void *buf, void *cdrarg) 1159 { 1160 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; 1161 1162 mutex_destroy(&sreq->sn_lock); 1163 cv_destroy(&sreq->sn_cv); 1164 } 1165 1166 /* 1167 * kmem_cache_alloc constructor for async request structure. 1168 */ 1169 /* ARGSUSED */ 1170 static int 1171 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags) 1172 { 1173 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf; 1174 1175 areq->an_type = CRYPTO_ASYNCH; 1176 areq->an_refcnt = 0; 1177 mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL); 1178 cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL); 1179 cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL); 1180 1181 return (0); 1182 } 1183 1184 /* ARGSUSED */ 1185 static void 1186 kcf_areq_cache_destructor(void *buf, void *cdrarg) 1187 { 1188 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf; 1189 1190 ASSERT(areq->an_refcnt == 0); 1191 mutex_destroy(&areq->an_lock); 1192 cv_destroy(&areq->an_done); 1193 cv_destroy(&areq->an_turn_cv); 1194 } 1195 1196 /* 1197 * kmem_cache_alloc constructor for kcf_context structure. 1198 */ 1199 /* ARGSUSED */ 1200 static int 1201 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags) 1202 { 1203 kcf_context_t *kctx = (kcf_context_t *)buf; 1204 1205 kctx->kc_refcnt = 0; 1206 mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL); 1207 1208 return (0); 1209 } 1210 1211 /* ARGSUSED */ 1212 static void 1213 kcf_context_cache_destructor(void *buf, void *cdrarg) 1214 { 1215 kcf_context_t *kctx = (kcf_context_t *)buf; 1216 1217 ASSERT(kctx->kc_refcnt == 0); 1218 mutex_destroy(&kctx->kc_in_use_lock); 1219 } 1220 1221 /* 1222 * Creates and initializes all the structures needed by the framework. 1223 */ 1224 void 1225 kcf_sched_init(void) 1226 { 1227 int i; 1228 kcf_reqid_table_t *rt; 1229 1230 /* 1231 * Create all the kmem caches needed by the framework. We set the 1232 * align argument to 64, to get a slab aligned to 64-byte as well as 1233 * have the objects (cache_chunksize) to be a 64-byte multiple. 1234 * This helps to avoid false sharing as this is the size of the 1235 * CPU cache line. 1236 */ 1237 kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache", 1238 sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor, 1239 kcf_sreq_cache_destructor, NULL, NULL, NULL, 0); 1240 1241 kcf_areq_cache = kmem_cache_create("kcf_areq_cache", 1242 sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor, 1243 kcf_areq_cache_destructor, NULL, NULL, NULL, 0); 1244 1245 kcf_context_cache = kmem_cache_create("kcf_context_cache", 1246 sizeof (struct kcf_context), 64, kcf_context_cache_constructor, 1247 kcf_context_cache_destructor, NULL, NULL, NULL, 0); 1248 1249 mutex_init(&kcf_dh_lock, NULL, MUTEX_DEFAULT, NULL); 1250 1251 gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP); 1252 1253 mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL); 1254 cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL); 1255 gswq->gs_njobs = 0; 1256 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc; 1257 gswq->gs_first = gswq->gs_last = NULL; 1258 1259 /* Initialize the global reqid table */ 1260 for (i = 0; i < REQID_TABLES; i++) { 1261 rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP); 1262 kcf_reqid_table[i] = rt; 1263 mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL); 1264 rt->rt_curid = i; 1265 } 1266 1267 /* Allocate and initialize the thread pool */ 1268 kcfpool_alloc(); 1269 1270 /* Initialize the event notification list variables */ 1271 mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL); 1272 cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL); 1273 1274 /* Initialize the crypto_bufcall list variables */ 1275 mutex_init(&cbuf_list_lock, NULL, MUTEX_DEFAULT, NULL); 1276 cv_init(&cbuf_list_cv, NULL, CV_DEFAULT, NULL); 1277 1278 /* Create the kcf kstat */ 1279 kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto", 1280 KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t), 1281 KSTAT_FLAG_VIRTUAL); 1282 1283 if (kcf_misc_kstat != NULL) { 1284 kcf_misc_kstat->ks_data = &kcf_ksdata; 1285 kcf_misc_kstat->ks_update = kcf_misc_kstat_update; 1286 kstat_install(kcf_misc_kstat); 1287 } 1288 } 1289 1290 /* 1291 * This routine should only be called by drv/cryptoadm. 1292 * 1293 * kcf_sched_running flag isn't protected by a lock. But, we are safe because 1294 * the first thread ("cryptoadm refresh") calling this routine during 1295 * boot time completes before any other thread that can call this routine. 1296 */ 1297 void 1298 kcf_sched_start(void) 1299 { 1300 if (kcf_sched_running) 1301 return; 1302 1303 /* Start the failover kernel thread for now */ 1304 (void) thread_create(NULL, 0, &kcf_failover_thread, 0, 0, &p0, 1305 TS_RUN, minclsyspri); 1306 1307 /* Start the background processing thread. */ 1308 (void) thread_create(NULL, 0, &crypto_bufcall_service, 0, 0, &p0, 1309 TS_RUN, minclsyspri); 1310 1311 kcf_sched_running = B_TRUE; 1312 } 1313 1314 /* 1315 * Signal the waiting sync client. 1316 */ 1317 void 1318 kcf_sop_done(kcf_sreq_node_t *sreq, int error) 1319 { 1320 mutex_enter(&sreq->sn_lock); 1321 sreq->sn_state = REQ_DONE; 1322 sreq->sn_rv = error; 1323 cv_signal(&sreq->sn_cv); 1324 mutex_exit(&sreq->sn_lock); 1325 } 1326 1327 /* 1328 * Callback the async client with the operation status. 1329 * We free the async request node and possibly the context. 1330 * We also handle any chain of requests hanging off of 1331 * the context. 1332 */ 1333 void 1334 kcf_aop_done(kcf_areq_node_t *areq, int error) 1335 { 1336 kcf_op_type_t optype; 1337 boolean_t skip_notify = B_FALSE; 1338 kcf_context_t *ictx; 1339 kcf_areq_node_t *nextreq; 1340 1341 /* 1342 * Handle recoverable errors. This has to be done first 1343 * before doing any thing else in this routine so that 1344 * we do not change the state of the request. 1345 */ 1346 if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { 1347 /* 1348 * We try another provider, if one is available. Else 1349 * we continue with the failure notification to the 1350 * client. 1351 */ 1352 if (kcf_resubmit_request(areq) == CRYPTO_QUEUED) 1353 return; 1354 } 1355 1356 mutex_enter(&areq->an_lock); 1357 areq->an_state = REQ_DONE; 1358 mutex_exit(&areq->an_lock); 1359 1360 optype = (&areq->an_params)->rp_optype; 1361 if ((ictx = areq->an_context) != NULL) { 1362 /* 1363 * A request after it is removed from the request 1364 * queue, still stays on a chain of requests hanging 1365 * of its context structure. It needs to be removed 1366 * from this chain at this point. 1367 */ 1368 mutex_enter(&ictx->kc_in_use_lock); 1369 nextreq = areq->an_ctxchain_next; 1370 if (nextreq != NULL) { 1371 mutex_enter(&nextreq->an_lock); 1372 nextreq->an_is_my_turn = B_TRUE; 1373 cv_signal(&nextreq->an_turn_cv); 1374 mutex_exit(&nextreq->an_lock); 1375 } 1376 1377 ictx->kc_req_chain_first = nextreq; 1378 if (nextreq == NULL) 1379 ictx->kc_req_chain_last = NULL; 1380 mutex_exit(&ictx->kc_in_use_lock); 1381 1382 if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) { 1383 ASSERT(nextreq == NULL); 1384 KCF_CONTEXT_REFRELE(ictx); 1385 } else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) { 1386 /* 1387 * NOTE - We do not release the context in case of update 1388 * operations. We require the consumer to free it explicitly, 1389 * in case it wants to abandon an update operation. This is done 1390 * as there may be mechanisms in ECB mode that can continue 1391 * even if an operation on a block fails. 1392 */ 1393 KCF_CONTEXT_REFRELE(ictx); 1394 } 1395 } 1396 1397 /* Deal with the internal continuation to this request first */ 1398 1399 if (areq->an_isdual) { 1400 kcf_dual_req_t *next_arg; 1401 next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg; 1402 next_arg->kr_areq = areq; 1403 KCF_AREQ_REFHOLD(areq); 1404 areq->an_isdual = B_FALSE; 1405 1406 NOTIFY_CLIENT(areq, error); 1407 return; 1408 } 1409 1410 /* 1411 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify 1412 * always. If this flag is clear, we skip the notification 1413 * provided there are no errors. We check this flag for only 1414 * init or update operations. It is ignored for single, final or 1415 * atomic operations. 1416 */ 1417 skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) && 1418 (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) && 1419 (error == CRYPTO_SUCCESS); 1420 1421 if (!skip_notify) { 1422 NOTIFY_CLIENT(areq, error); 1423 } 1424 1425 if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID)) 1426 kcf_reqid_delete(areq); 1427 1428 KCF_AREQ_REFRELE(areq); 1429 } 1430 1431 /* 1432 * Allocate the thread pool and initialize all the fields. 1433 */ 1434 static void 1435 kcfpool_alloc() 1436 { 1437 kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP); 1438 1439 kcfpool->kp_threads = kcfpool->kp_idlethreads = 0; 1440 kcfpool->kp_blockedthreads = 0; 1441 kcfpool->kp_signal_create_thread = B_FALSE; 1442 kcfpool->kp_nthrs = 0; 1443 kcfpool->kp_user_waiting = B_FALSE; 1444 1445 mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL); 1446 cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL); 1447 1448 mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL); 1449 cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL); 1450 1451 kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT; 1452 } 1453 1454 /* 1455 * This function is run by the 'creator' thread in the pool. 1456 * It is called from ioctl(CRYPTO_POOL_WAIT, ...). 1457 */ 1458 int 1459 kcf_svc_wait(int *nthrs) 1460 { 1461 clock_t rv; 1462 clock_t timeout_val = drv_usectohz(kcf_idlethr_timeout); 1463 1464 if (kcfpool == NULL) 1465 return (ENOENT); 1466 1467 mutex_enter(&kcfpool->kp_user_lock); 1468 /* Check if there's already a user thread waiting on this kcfpool */ 1469 if (kcfpool->kp_user_waiting) { 1470 mutex_exit(&kcfpool->kp_user_lock); 1471 *nthrs = 0; 1472 return (EBUSY); 1473 } 1474 1475 kcfpool->kp_user_waiting = B_TRUE; 1476 1477 /* Go to sleep, waiting for the signaled flag. */ 1478 while (!kcfpool->kp_signal_create_thread) { 1479 rv = cv_reltimedwait_sig(&kcfpool->kp_user_cv, 1480 &kcfpool->kp_user_lock, timeout_val, TR_CLOCK_TICK); 1481 switch (rv) { 1482 case 0: 1483 /* Interrupted, return to handle exit or signal */ 1484 kcfpool->kp_user_waiting = B_FALSE; 1485 kcfpool->kp_signal_create_thread = B_FALSE; 1486 mutex_exit(&kcfpool->kp_user_lock); 1487 /* 1488 * kcfd is exiting. Release the door and 1489 * invalidate it. 1490 */ 1491 mutex_enter(&kcf_dh_lock); 1492 if (kcf_dh != NULL) { 1493 door_ki_rele(kcf_dh); 1494 kcf_dh = NULL; 1495 } 1496 mutex_exit(&kcf_dh_lock); 1497 return (EINTR); 1498 1499 case -1: 1500 /* Timed out. Recalculate the min/max threads */ 1501 compute_min_max_threads(); 1502 break; 1503 1504 default: 1505 /* Worker thread did a cv_signal() */ 1506 break; 1507 } 1508 } 1509 1510 kcfpool->kp_signal_create_thread = B_FALSE; 1511 kcfpool->kp_user_waiting = B_FALSE; 1512 1513 *nthrs = kcfpool->kp_nthrs; 1514 mutex_exit(&kcfpool->kp_user_lock); 1515 1516 /* Return to userland for possible thread creation. */ 1517 return (0); 1518 } 1519 1520 1521 /* 1522 * This routine introduces a locking order for gswq->gs_lock followed 1523 * by cpu_lock. 1524 * This means that no consumer of the k-api should hold cpu_lock when calling 1525 * k-api routines. 1526 */ 1527 static void 1528 compute_min_max_threads() 1529 { 1530 mutex_enter(&gswq->gs_lock); 1531 mutex_enter(&cpu_lock); 1532 kcf_minthreads = curthread->t_cpupart->cp_ncpus; 1533 mutex_exit(&cpu_lock); 1534 kcf_maxthreads = kcf_thr_multiple * kcf_minthreads; 1535 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc; 1536 mutex_exit(&gswq->gs_lock); 1537 } 1538 1539 /* 1540 * This is the main routine of the failover kernel thread. 1541 * If there are any threads in the pool we sleep. The last thread in the 1542 * pool to exit will signal us to get to work. We get back to sleep 1543 * once we detect that the pool has threads. 1544 * 1545 * Note that in the hand-off from us to a pool thread we get to run once. 1546 * Since this hand-off is a rare event this should be fine. 1547 */ 1548 static void 1549 kcf_failover_thread() 1550 { 1551 int error = 0; 1552 kcf_context_t *ictx; 1553 kcf_areq_node_t *req; 1554 callb_cpr_t cpr_info; 1555 kmutex_t cpr_lock; 1556 static boolean_t is_logged = B_FALSE; 1557 1558 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 1559 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, 1560 "kcf_failover_thread"); 1561 1562 for (;;) { 1563 /* 1564 * Wait if there are any threads are in the pool. 1565 */ 1566 if (kcfpool->kp_threads > 0) { 1567 mutex_enter(&cpr_lock); 1568 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1569 mutex_exit(&cpr_lock); 1570 1571 mutex_enter(&kcfpool->kp_thread_lock); 1572 cv_wait(&kcfpool->kp_nothr_cv, 1573 &kcfpool->kp_thread_lock); 1574 mutex_exit(&kcfpool->kp_thread_lock); 1575 1576 mutex_enter(&cpr_lock); 1577 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 1578 mutex_exit(&cpr_lock); 1579 is_logged = B_FALSE; 1580 } 1581 1582 /* 1583 * Get the requests from the queue and wait if needed. 1584 */ 1585 mutex_enter(&gswq->gs_lock); 1586 1587 while ((req = kcf_dequeue()) == NULL) { 1588 mutex_enter(&cpr_lock); 1589 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1590 mutex_exit(&cpr_lock); 1591 1592 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads); 1593 cv_wait(&gswq->gs_cv, &gswq->gs_lock); 1594 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads); 1595 1596 mutex_enter(&cpr_lock); 1597 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 1598 mutex_exit(&cpr_lock); 1599 } 1600 1601 mutex_exit(&gswq->gs_lock); 1602 1603 /* 1604 * We check the kp_threads since kcfd could have started 1605 * while we are waiting on the global software queue. 1606 */ 1607 if (kcfpool->kp_threads <= 0 && !is_logged) { 1608 cmn_err(CE_WARN, "kcfd is not running. Please check " 1609 "and restart kcfd. Using the failover kernel " 1610 "thread for now.\n"); 1611 is_logged = B_TRUE; 1612 } 1613 1614 /* 1615 * Get to work on the request. 1616 */ 1617 ictx = req->an_context; 1618 mutex_enter(&req->an_lock); 1619 req->an_state = REQ_INPROGRESS; 1620 mutex_exit(&req->an_lock); 1621 1622 error = common_submit_request(req->an_provider, ictx ? 1623 &ictx->kc_glbl_ctx : NULL, &req->an_params, req); 1624 1625 kcf_aop_done(req, error); 1626 } 1627 } 1628 1629 /* 1630 * Insert the async request in the hash table after assigning it 1631 * an ID. Returns the ID. 1632 * 1633 * The ID is used by the caller to pass as an argument to a 1634 * cancel_req() routine later. 1635 */ 1636 static crypto_req_id_t 1637 kcf_reqid_insert(kcf_areq_node_t *areq) 1638 { 1639 int indx; 1640 crypto_req_id_t id; 1641 kcf_areq_node_t *headp; 1642 kcf_reqid_table_t *rt = 1643 kcf_reqid_table[CPU->cpu_seqid & REQID_TABLE_MASK]; 1644 1645 mutex_enter(&rt->rt_lock); 1646 1647 rt->rt_curid = id = 1648 (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH; 1649 SET_REQID(areq, id); 1650 indx = REQID_HASH(id); 1651 headp = areq->an_idnext = rt->rt_idhash[indx]; 1652 areq->an_idprev = NULL; 1653 if (headp != NULL) 1654 headp->an_idprev = areq; 1655 1656 rt->rt_idhash[indx] = areq; 1657 mutex_exit(&rt->rt_lock); 1658 1659 return (id); 1660 } 1661 1662 /* 1663 * Delete the async request from the hash table. 1664 */ 1665 static void 1666 kcf_reqid_delete(kcf_areq_node_t *areq) 1667 { 1668 int indx; 1669 kcf_areq_node_t *nextp, *prevp; 1670 crypto_req_id_t id = GET_REQID(areq); 1671 kcf_reqid_table_t *rt; 1672 1673 rt = kcf_reqid_table[id & REQID_TABLE_MASK]; 1674 indx = REQID_HASH(id); 1675 1676 mutex_enter(&rt->rt_lock); 1677 1678 nextp = areq->an_idnext; 1679 prevp = areq->an_idprev; 1680 if (nextp != NULL) 1681 nextp->an_idprev = prevp; 1682 if (prevp != NULL) 1683 prevp->an_idnext = nextp; 1684 else 1685 rt->rt_idhash[indx] = nextp; 1686 1687 SET_REQID(areq, 0); 1688 cv_broadcast(&areq->an_done); 1689 1690 mutex_exit(&rt->rt_lock); 1691 } 1692 1693 /* 1694 * Cancel a single asynchronous request. 1695 * 1696 * We guarantee that no problems will result from calling 1697 * crypto_cancel_req() for a request which is either running, or 1698 * has already completed. We remove the request from any queues 1699 * if it is possible. We wait for request completion if the 1700 * request is dispatched to a provider. 1701 * 1702 * Calling context: 1703 * Can be called from user context only. 1704 * 1705 * NOTE: We acquire the following locks in this routine (in order): 1706 * - rt_lock (kcf_reqid_table_t) 1707 * - gswq->gs_lock 1708 * - areq->an_lock 1709 * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain()) 1710 * 1711 * This locking order MUST be maintained in code every where else. 1712 */ 1713 void 1714 crypto_cancel_req(crypto_req_id_t id) 1715 { 1716 int indx; 1717 kcf_areq_node_t *areq; 1718 kcf_provider_desc_t *pd; 1719 kcf_context_t *ictx; 1720 kcf_reqid_table_t *rt; 1721 1722 rt = kcf_reqid_table[id & REQID_TABLE_MASK]; 1723 indx = REQID_HASH(id); 1724 1725 mutex_enter(&rt->rt_lock); 1726 for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) { 1727 if (GET_REQID(areq) == id) { 1728 /* 1729 * We found the request. It is either still waiting 1730 * in the framework queues or running at the provider. 1731 */ 1732 pd = areq->an_provider; 1733 ASSERT(pd != NULL); 1734 1735 switch (pd->pd_prov_type) { 1736 case CRYPTO_SW_PROVIDER: 1737 mutex_enter(&gswq->gs_lock); 1738 mutex_enter(&areq->an_lock); 1739 1740 /* This request can be safely canceled. */ 1741 if (areq->an_state <= REQ_WAITING) { 1742 /* Remove from gswq, global software queue. */ 1743 kcf_remove_node(areq); 1744 if ((ictx = areq->an_context) != NULL) 1745 kcf_removereq_in_ctxchain(ictx, areq); 1746 1747 mutex_exit(&areq->an_lock); 1748 mutex_exit(&gswq->gs_lock); 1749 mutex_exit(&rt->rt_lock); 1750 1751 /* Remove areq from hash table and free it. */ 1752 kcf_reqid_delete(areq); 1753 KCF_AREQ_REFRELE(areq); 1754 return; 1755 } 1756 1757 mutex_exit(&areq->an_lock); 1758 mutex_exit(&gswq->gs_lock); 1759 break; 1760 1761 case CRYPTO_HW_PROVIDER: 1762 /* 1763 * There is no interface to remove an entry 1764 * once it is on the taskq. So, we do not do 1765 * any thing for a hardware provider. 1766 */ 1767 break; 1768 } 1769 1770 /* 1771 * The request is running. Wait for the request completion 1772 * to notify us. 1773 */ 1774 KCF_AREQ_REFHOLD(areq); 1775 while (GET_REQID(areq) == id) 1776 cv_wait(&areq->an_done, &rt->rt_lock); 1777 KCF_AREQ_REFRELE(areq); 1778 break; 1779 } 1780 } 1781 1782 mutex_exit(&rt->rt_lock); 1783 } 1784 1785 /* 1786 * Cancel all asynchronous requests associated with the 1787 * passed in crypto context and free it. 1788 * 1789 * A client SHOULD NOT call this routine after calling a crypto_*_final 1790 * routine. This routine is called only during intermediate operations. 1791 * The client should not use the crypto context after this function returns 1792 * since we destroy it. 1793 * 1794 * Calling context: 1795 * Can be called from user context only. 1796 */ 1797 void 1798 crypto_cancel_ctx(crypto_context_t ctx) 1799 { 1800 kcf_context_t *ictx; 1801 kcf_areq_node_t *areq; 1802 1803 if (ctx == NULL) 1804 return; 1805 1806 ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private; 1807 1808 mutex_enter(&ictx->kc_in_use_lock); 1809 1810 /* Walk the chain and cancel each request */ 1811 while ((areq = ictx->kc_req_chain_first) != NULL) { 1812 /* 1813 * We have to drop the lock here as we may have 1814 * to wait for request completion. We hold the 1815 * request before dropping the lock though, so that it 1816 * won't be freed underneath us. 1817 */ 1818 KCF_AREQ_REFHOLD(areq); 1819 mutex_exit(&ictx->kc_in_use_lock); 1820 1821 crypto_cancel_req(GET_REQID(areq)); 1822 KCF_AREQ_REFRELE(areq); 1823 1824 mutex_enter(&ictx->kc_in_use_lock); 1825 } 1826 1827 mutex_exit(&ictx->kc_in_use_lock); 1828 KCF_CONTEXT_REFRELE(ictx); 1829 } 1830 1831 /* 1832 * Update kstats. 1833 */ 1834 static int 1835 kcf_misc_kstat_update(kstat_t *ksp, int rw) 1836 { 1837 uint_t tcnt; 1838 kcf_stats_t *ks_data; 1839 1840 if (rw == KSTAT_WRITE) 1841 return (EACCES); 1842 1843 ks_data = ksp->ks_data; 1844 1845 ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads; 1846 /* 1847 * The failover thread is counted in kp_idlethreads in 1848 * some corner cases. This is done to avoid doing more checks 1849 * when submitting a request. We account for those cases below. 1850 */ 1851 if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1)) 1852 tcnt--; 1853 ks_data->ks_idle_thrs.value.ui32 = tcnt; 1854 ks_data->ks_minthrs.value.ui32 = kcf_minthreads; 1855 ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads; 1856 ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs; 1857 ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs; 1858 ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads; 1859 ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc; 1860 ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc; 1861 1862 return (0); 1863 } 1864 1865 /* 1866 * Allocate and initiatize a kcf_dual_req, used for saving the arguments of 1867 * a dual operation or an atomic operation that has to be internally 1868 * simulated with multiple single steps. 1869 * crq determines the memory allocation flags. 1870 */ 1871 1872 kcf_dual_req_t * 1873 kcf_alloc_req(crypto_call_req_t *crq) 1874 { 1875 kcf_dual_req_t *kcr; 1876 1877 kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq)); 1878 1879 if (kcr == NULL) 1880 return (NULL); 1881 1882 /* Copy the whole crypto_call_req struct, as it isn't persistant */ 1883 if (crq != NULL) 1884 kcr->kr_callreq = *crq; 1885 else 1886 bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t)); 1887 kcr->kr_areq = NULL; 1888 kcr->kr_saveoffset = 0; 1889 kcr->kr_savelen = 0; 1890 1891 return (kcr); 1892 } 1893 1894 /* 1895 * Callback routine for the next part of a simulated dual part. 1896 * Schedules the next step. 1897 * 1898 * This routine can be called from interrupt context. 1899 */ 1900 void 1901 kcf_next_req(void *next_req_arg, int status) 1902 { 1903 kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg; 1904 kcf_req_params_t *params = &(next_req->kr_params); 1905 kcf_areq_node_t *areq = next_req->kr_areq; 1906 int error = status; 1907 kcf_provider_desc_t *pd; 1908 crypto_dual_data_t *ct; 1909 1910 /* Stop the processing if an error occured at this step */ 1911 if (error != CRYPTO_SUCCESS) { 1912 out: 1913 areq->an_reqarg = next_req->kr_callreq; 1914 KCF_AREQ_REFRELE(areq); 1915 kmem_free(next_req, sizeof (kcf_dual_req_t)); 1916 areq->an_isdual = B_FALSE; 1917 kcf_aop_done(areq, error); 1918 return; 1919 } 1920 1921 switch (params->rp_opgrp) { 1922 case KCF_OG_MAC: { 1923 1924 /* 1925 * The next req is submitted with the same reqid as the 1926 * first part. The consumer only got back that reqid, and 1927 * should still be able to cancel the operation during its 1928 * second step. 1929 */ 1930 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); 1931 crypto_ctx_template_t mac_tmpl; 1932 kcf_mech_entry_t *me; 1933 1934 ct = (crypto_dual_data_t *)mops->mo_data; 1935 mac_tmpl = (crypto_ctx_template_t)mops->mo_templ; 1936 1937 /* No expected recoverable failures, so no retry list */ 1938 pd = kcf_get_mech_provider(mops->mo_framework_mechtype, NULL, 1939 &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC, 1940 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2); 1941 1942 if (pd == NULL) { 1943 error = CRYPTO_MECH_NOT_SUPPORTED; 1944 goto out; 1945 } 1946 /* Validate the MAC context template here */ 1947 if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && 1948 (mac_tmpl != NULL)) { 1949 kcf_ctx_template_t *ctx_mac_tmpl; 1950 1951 ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl; 1952 1953 if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) { 1954 KCF_PROV_REFRELE(pd); 1955 error = CRYPTO_OLD_CTX_TEMPLATE; 1956 goto out; 1957 } 1958 mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl; 1959 } 1960 1961 break; 1962 } 1963 case KCF_OG_DECRYPT: { 1964 kcf_decrypt_ops_params_t *dcrops = 1965 &(params->rp_u.decrypt_params); 1966 1967 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; 1968 /* No expected recoverable failures, so no retry list */ 1969 pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype, 1970 NULL, NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC, 1971 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1); 1972 1973 if (pd == NULL) { 1974 error = CRYPTO_MECH_NOT_SUPPORTED; 1975 goto out; 1976 } 1977 break; 1978 } 1979 } 1980 1981 /* The second step uses len2 and offset2 of the dual_data */ 1982 next_req->kr_saveoffset = ct->dd_offset1; 1983 next_req->kr_savelen = ct->dd_len1; 1984 ct->dd_offset1 = ct->dd_offset2; 1985 ct->dd_len1 = ct->dd_len2; 1986 1987 /* preserve if the caller is restricted */ 1988 if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) { 1989 areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED; 1990 } else { 1991 areq->an_reqarg.cr_flag = 0; 1992 } 1993 1994 areq->an_reqarg.cr_callback_func = kcf_last_req; 1995 areq->an_reqarg.cr_callback_arg = next_req; 1996 areq->an_isdual = B_TRUE; 1997 1998 /* 1999 * We would like to call kcf_submit_request() here. But, 2000 * that is not possible as that routine allocates a new 2001 * kcf_areq_node_t request structure, while we need to 2002 * reuse the existing request structure. 2003 */ 2004 switch (pd->pd_prov_type) { 2005 case CRYPTO_SW_PROVIDER: 2006 error = common_submit_request(pd, NULL, params, 2007 KCF_RHNDL(KM_NOSLEEP)); 2008 break; 2009 2010 case CRYPTO_HW_PROVIDER: { 2011 kcf_provider_desc_t *old_pd; 2012 taskq_t *taskq = pd->pd_taskq; 2013 2014 /* 2015 * Set the params for the second step in the 2016 * dual-ops. 2017 */ 2018 areq->an_params = *params; 2019 old_pd = areq->an_provider; 2020 KCF_PROV_REFRELE(old_pd); 2021 KCF_PROV_REFHOLD(pd); 2022 areq->an_provider = pd; 2023 2024 /* 2025 * Note that we have to do a taskq_dispatch() 2026 * here as we may be in interrupt context. 2027 */ 2028 if (taskq_dispatch(taskq, process_req_hwp, areq, 2029 TQ_NOSLEEP) == (taskqid_t)0) { 2030 error = CRYPTO_HOST_MEMORY; 2031 } else { 2032 error = CRYPTO_QUEUED; 2033 } 2034 break; 2035 } 2036 } 2037 2038 /* 2039 * We have to release the holds on the request and the provider 2040 * in all cases. 2041 */ 2042 KCF_AREQ_REFRELE(areq); 2043 KCF_PROV_REFRELE(pd); 2044 2045 if (error != CRYPTO_QUEUED) { 2046 /* restore, clean up, and invoke the client's callback */ 2047 2048 ct->dd_offset1 = next_req->kr_saveoffset; 2049 ct->dd_len1 = next_req->kr_savelen; 2050 areq->an_reqarg = next_req->kr_callreq; 2051 kmem_free(next_req, sizeof (kcf_dual_req_t)); 2052 areq->an_isdual = B_FALSE; 2053 kcf_aop_done(areq, error); 2054 } 2055 } 2056 2057 /* 2058 * Last part of an emulated dual operation. 2059 * Clean up and restore ... 2060 */ 2061 void 2062 kcf_last_req(void *last_req_arg, int status) 2063 { 2064 kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg; 2065 2066 kcf_req_params_t *params = &(last_req->kr_params); 2067 kcf_areq_node_t *areq = last_req->kr_areq; 2068 crypto_dual_data_t *ct; 2069 2070 switch (params->rp_opgrp) { 2071 case KCF_OG_MAC: { 2072 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); 2073 2074 ct = (crypto_dual_data_t *)mops->mo_data; 2075 break; 2076 } 2077 case KCF_OG_DECRYPT: { 2078 kcf_decrypt_ops_params_t *dcrops = 2079 &(params->rp_u.decrypt_params); 2080 2081 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; 2082 break; 2083 } 2084 } 2085 ct->dd_offset1 = last_req->kr_saveoffset; 2086 ct->dd_len1 = last_req->kr_savelen; 2087 2088 /* The submitter used kcf_last_req as its callback */ 2089 2090 if (areq == NULL) { 2091 crypto_call_req_t *cr = &last_req->kr_callreq; 2092 2093 (*(cr->cr_callback_func))(cr->cr_callback_arg, status); 2094 kmem_free(last_req, sizeof (kcf_dual_req_t)); 2095 return; 2096 } 2097 areq->an_reqarg = last_req->kr_callreq; 2098 KCF_AREQ_REFRELE(areq); 2099 kmem_free(last_req, sizeof (kcf_dual_req_t)); 2100 areq->an_isdual = B_FALSE; 2101 kcf_aop_done(areq, status); 2102 } 2103