xref: /illumos-gate/usr/src/uts/common/crypto/core/kcf_sched.c (revision ca9327a6de44d69ddab3668cc1e143ce781387a3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * This file contains the core framework routines for the
30  * kernel cryptographic framework. These routines are at the
31  * layer, between the kernel API/ioctls and the SPI.
32  */
33 
34 #include <sys/types.h>
35 #include <sys/errno.h>
36 #include <sys/kmem.h>
37 #include <sys/proc.h>
38 #include <sys/cpuvar.h>
39 #include <sys/cpupart.h>
40 #include <sys/ksynch.h>
41 #include <sys/callb.h>
42 #include <sys/cmn_err.h>
43 #include <sys/systm.h>
44 #include <sys/sysmacros.h>
45 #include <sys/kstat.h>
46 #include <sys/crypto/common.h>
47 #include <sys/crypto/impl.h>
48 #include <sys/crypto/sched_impl.h>
49 #include <sys/crypto/api.h>
50 #include <sys/crypto/spi.h>
51 #include <sys/taskq_impl.h>
52 #include <sys/ddi.h>
53 #include <sys/sunddi.h>
54 
55 
56 kcf_global_swq_t *gswq;	/* Global software queue */
57 
58 /* Thread pool related variables */
59 static kcf_pool_t *kcfpool;	/* Thread pool of kcfd LWPs */
60 int kcf_maxthreads;
61 int kcf_minthreads;
62 int kcf_thr_multiple = 2;	/* Boot-time tunable for experimentation */
63 static ulong_t	kcf_idlethr_timeout;
64 static boolean_t kcf_sched_running = B_FALSE;
65 #define	KCF_DEFAULT_THRTIMEOUT	60000000	/* 60 seconds */
66 
67 /* kmem caches used by the scheduler */
68 static struct kmem_cache *kcf_sreq_cache;
69 static struct kmem_cache *kcf_areq_cache;
70 static struct kmem_cache *kcf_context_cache;
71 
72 /* Global request ID table */
73 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES];
74 
75 /* KCF stats. Not protected. */
76 static kcf_stats_t kcf_ksdata = {
77 	{ "total threads in pool",	KSTAT_DATA_UINT32},
78 	{ "idle threads in pool",	KSTAT_DATA_UINT32},
79 	{ "min threads in pool",	KSTAT_DATA_UINT32},
80 	{ "max threads in pool",	KSTAT_DATA_UINT32},
81 	{ "requests in gswq",		KSTAT_DATA_UINT32},
82 	{ "max requests in gswq",	KSTAT_DATA_UINT32},
83 	{ "threads for HW taskq",	KSTAT_DATA_UINT32},
84 	{ "minalloc for HW taskq",	KSTAT_DATA_UINT32},
85 	{ "maxalloc for HW taskq",	KSTAT_DATA_UINT32}
86 };
87 
88 static kstat_t *kcf_misc_kstat = NULL;
89 ulong_t kcf_swprov_hndl = 0;
90 
91 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *,
92     kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t);
93 static int kcf_disp_sw_request(kcf_areq_node_t *);
94 static void process_req_hwp(void *);
95 static kcf_areq_node_t	*kcf_dequeue();
96 static int kcf_enqueue(kcf_areq_node_t *);
97 static void kcf_failover_thread();
98 static void kcfpool_alloc();
99 static void kcf_reqid_delete(kcf_areq_node_t *areq);
100 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq);
101 static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
102 static void compute_min_max_threads();
103 
104 
105 /*
106  * Create a new context.
107  */
108 crypto_ctx_t *
109 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
110     crypto_session_id_t sid)
111 {
112 	crypto_ctx_t *ctx;
113 	kcf_context_t *kcf_ctx;
114 
115 	kcf_ctx = kmem_cache_alloc(kcf_context_cache,
116 	    (crq == NULL) ? KM_SLEEP : KM_NOSLEEP);
117 	if (kcf_ctx == NULL)
118 		return (NULL);
119 
120 	/* initialize the context for the consumer */
121 	kcf_ctx->kc_refcnt = 1;
122 	kcf_ctx->kc_req_chain_first = NULL;
123 	kcf_ctx->kc_req_chain_last = NULL;
124 	kcf_ctx->kc_secondctx = NULL;
125 	KCF_PROV_REFHOLD(pd);
126 	kcf_ctx->kc_prov_desc = pd;
127 	kcf_ctx->kc_sw_prov_desc = NULL;
128 	kcf_ctx->kc_mech = NULL;
129 
130 	ctx = &kcf_ctx->kc_glbl_ctx;
131 	ctx->cc_provider = pd->pd_prov_handle;
132 	ctx->cc_session = sid;
133 	ctx->cc_provider_private = NULL;
134 	ctx->cc_framework_private = (void *)kcf_ctx;
135 	ctx->cc_flags = 0;
136 	ctx->cc_opstate = NULL;
137 
138 	return (ctx);
139 }
140 
141 /*
142  * Allocate a new async request node.
143  *
144  * ictx - Framework private context pointer
145  * crq - Has callback function and argument. Should be non NULL.
146  * req - The parameters to pass to the SPI
147  */
148 static kcf_areq_node_t *
149 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
150     crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual)
151 {
152 	kcf_areq_node_t	*arptr, *areq;
153 
154 	ASSERT(crq != NULL);
155 	arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP);
156 	if (arptr == NULL)
157 		return (NULL);
158 
159 	arptr->an_state = REQ_ALLOCATED;
160 	arptr->an_reqarg = *crq;
161 	arptr->an_params = *req;
162 	arptr->an_context = ictx;
163 	arptr->an_isdual = isdual;
164 
165 	arptr->an_next = arptr->an_prev = NULL;
166 	KCF_PROV_REFHOLD(pd);
167 	arptr->an_provider = pd;
168 	arptr->an_tried_plist = NULL;
169 	arptr->an_refcnt = 1;
170 	arptr->an_idnext = arptr->an_idprev = NULL;
171 
172 	/*
173 	 * Requests for context-less operations do not use the
174 	 * fields - an_is_my_turn, and an_ctxchain_next.
175 	 */
176 	if (ictx == NULL)
177 		return (arptr);
178 
179 	KCF_CONTEXT_REFHOLD(ictx);
180 	/*
181 	 * Chain this request to the context.
182 	 */
183 	mutex_enter(&ictx->kc_in_use_lock);
184 	arptr->an_ctxchain_next = NULL;
185 	if ((areq = ictx->kc_req_chain_last) == NULL) {
186 		arptr->an_is_my_turn = B_TRUE;
187 		ictx->kc_req_chain_last =
188 		    ictx->kc_req_chain_first = arptr;
189 	} else {
190 		ASSERT(ictx->kc_req_chain_first != NULL);
191 		arptr->an_is_my_turn = B_FALSE;
192 		/* Insert the new request to the end of the chain. */
193 		areq->an_ctxchain_next = arptr;
194 		ictx->kc_req_chain_last = arptr;
195 	}
196 	mutex_exit(&ictx->kc_in_use_lock);
197 
198 	return (arptr);
199 }
200 
201 /*
202  * Queue the request node and do one of the following:
203  *	- If there is an idle thread signal it to run.
204  *	- If there is no idle thread and max running threads is not
205  *	  reached, signal the creator thread for more threads.
206  *
207  * If the two conditions above are not met, we don't need to do
208  * any thing. The request will be picked up by one of the
209  * worker threads when it becomes available.
210  */
211 static int
212 kcf_disp_sw_request(kcf_areq_node_t *areq)
213 {
214 	int err;
215 	int cnt = 0;
216 
217 	if ((err = kcf_enqueue(areq)) != 0)
218 		return (err);
219 
220 	if (kcfpool->kp_idlethreads > 0) {
221 		/* Signal an idle thread to run */
222 		mutex_enter(&gswq->gs_lock);
223 		cv_signal(&gswq->gs_cv);
224 		mutex_exit(&gswq->gs_lock);
225 
226 		return (CRYPTO_QUEUED);
227 	}
228 
229 	/*
230 	 * We keep the number of running threads to be at
231 	 * kcf_minthreads to reduce gs_lock contention.
232 	 */
233 	cnt = kcf_minthreads -
234 	    (kcfpool->kp_threads - kcfpool->kp_blockedthreads);
235 	if (cnt > 0) {
236 		/*
237 		 * The following ensures the number of threads in pool
238 		 * does not exceed kcf_maxthreads.
239 		 */
240 		cnt = min(cnt, kcf_maxthreads - kcfpool->kp_threads);
241 		if (cnt > 0) {
242 			/* Signal the creator thread for more threads */
243 			mutex_enter(&kcfpool->kp_user_lock);
244 			if (!kcfpool->kp_signal_create_thread) {
245 				kcfpool->kp_signal_create_thread = B_TRUE;
246 				kcfpool->kp_nthrs = cnt;
247 				cv_signal(&kcfpool->kp_user_cv);
248 			}
249 			mutex_exit(&kcfpool->kp_user_lock);
250 		}
251 	}
252 
253 	return (CRYPTO_QUEUED);
254 }
255 
256 /*
257  * This routine is called by the taskq associated with
258  * each hardware provider. We notify the kernel consumer
259  * via the callback routine in case of CRYPTO_SUCCESS or
260  * a failure.
261  *
262  * A request can be of type kcf_areq_node_t or of type
263  * kcf_sreq_node_t.
264  */
265 static void
266 process_req_hwp(void *ireq)
267 {
268 	int error = 0;
269 	crypto_ctx_t *ctx;
270 	kcf_call_type_t ctype;
271 	kcf_provider_desc_t *pd;
272 	kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
273 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;
274 
275 	pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
276 	    sreq->sn_provider : areq->an_provider;
277 
278 	/*
279 	 * Wait if flow control is in effect for the provider. A
280 	 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
281 	 * notification will signal us. We also get signaled if
282 	 * the provider is unregistering.
283 	 */
284 	if (pd->pd_state == KCF_PROV_BUSY) {
285 		mutex_enter(&pd->pd_lock);
286 		while (pd->pd_state == KCF_PROV_BUSY)
287 			cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
288 		mutex_exit(&pd->pd_lock);
289 	}
290 
291 	/*
292 	 * Bump the internal reference count while the request is being
293 	 * processed. This is how we know when it's safe to unregister
294 	 * a provider. This step must precede the pd_state check below.
295 	 */
296 	KCF_PROV_IREFHOLD(pd);
297 
298 	/*
299 	 * Fail the request if the provider has failed. We return a
300 	 * recoverable error and the notified clients attempt any
301 	 * recovery. For async clients this is done in kcf_aop_done()
302 	 * and for sync clients it is done in the k-api routines.
303 	 */
304 	if (pd->pd_state >= KCF_PROV_FAILED) {
305 		error = CRYPTO_DEVICE_ERROR;
306 		goto bail;
307 	}
308 
309 	if (ctype == CRYPTO_SYNCH) {
310 		mutex_enter(&sreq->sn_lock);
311 		sreq->sn_state = REQ_INPROGRESS;
312 		mutex_exit(&sreq->sn_lock);
313 
314 		ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
315 		error = common_submit_request(sreq->sn_provider, ctx,
316 		    sreq->sn_params, sreq);
317 	} else {
318 		kcf_context_t *ictx;
319 		ASSERT(ctype == CRYPTO_ASYNCH);
320 
321 		/*
322 		 * We are in the per-hardware provider thread context and
323 		 * hence can sleep. Note that the caller would have done
324 		 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
325 		 */
326 		ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;
327 
328 		mutex_enter(&areq->an_lock);
329 		/*
330 		 * We need to maintain ordering for multi-part requests.
331 		 * an_is_my_turn is set to B_TRUE initially for a request
332 		 * when it is enqueued and there are no other requests
333 		 * for that context. It is set later from kcf_aop_done() when
334 		 * the request before us in the chain of requests for the
335 		 * context completes. We get signaled at that point.
336 		 */
337 		if (ictx != NULL) {
338 			ASSERT(ictx->kc_prov_desc == areq->an_provider);
339 
340 			while (areq->an_is_my_turn == B_FALSE) {
341 				cv_wait(&areq->an_turn_cv, &areq->an_lock);
342 			}
343 		}
344 		areq->an_state = REQ_INPROGRESS;
345 		mutex_exit(&areq->an_lock);
346 
347 		error = common_submit_request(areq->an_provider, ctx,
348 		    &areq->an_params, areq);
349 	}
350 
351 bail:
352 	if (error == CRYPTO_QUEUED) {
353 		/*
354 		 * The request is queued by the provider and we should
355 		 * get a crypto_op_notification() from the provider later.
356 		 * We notify the consumer at that time.
357 		 */
358 		return;
359 	} else {		/* CRYPTO_SUCCESS or other failure */
360 		KCF_PROV_IREFRELE(pd);
361 		if (ctype == CRYPTO_SYNCH)
362 			kcf_sop_done(sreq, error);
363 		else
364 			kcf_aop_done(areq, error);
365 	}
366 }
367 
368 /*
369  * This routine checks if a request can be retried on another
370  * provider. If true, mech1 is initialized to point to the mechanism
371  * structure. mech2 is also initialized in case of a dual operation. fg
372  * is initialized to the correct crypto_func_group_t bit flag. They are
373  * initialized by this routine, so that the caller can pass them to a
374  * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
375  *
376  * We check that the request is for a init or atomic routine and that
377  * it is for one of the operation groups used from k-api .
378  */
379 static boolean_t
380 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
381     crypto_mechanism_t **mech2, crypto_func_group_t *fg)
382 {
383 	kcf_req_params_t *params;
384 	kcf_op_type_t optype;
385 
386 	params = &areq->an_params;
387 	optype = params->rp_optype;
388 
389 	if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype)))
390 		return (B_FALSE);
391 
392 	switch (params->rp_opgrp) {
393 	case KCF_OG_DIGEST: {
394 		kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
395 
396 		dops->do_mech.cm_type = dops->do_framework_mechtype;
397 		*mech1 = &dops->do_mech;
398 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST :
399 		    CRYPTO_FG_DIGEST_ATOMIC;
400 		break;
401 	}
402 
403 	case KCF_OG_MAC: {
404 		kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
405 
406 		mops->mo_mech.cm_type = mops->mo_framework_mechtype;
407 		*mech1 = &mops->mo_mech;
408 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC :
409 		    CRYPTO_FG_MAC_ATOMIC;
410 		break;
411 	}
412 
413 	case KCF_OG_SIGN: {
414 		kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
415 
416 		sops->so_mech.cm_type = sops->so_framework_mechtype;
417 		*mech1 = &sops->so_mech;
418 		switch (optype) {
419 		case KCF_OP_INIT:
420 			*fg = CRYPTO_FG_SIGN;
421 			break;
422 		case KCF_OP_ATOMIC:
423 			*fg = CRYPTO_FG_SIGN_ATOMIC;
424 			break;
425 		default:
426 			ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC);
427 			*fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC;
428 		}
429 		break;
430 	}
431 
432 	case KCF_OG_VERIFY: {
433 		kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
434 
435 		vops->vo_mech.cm_type = vops->vo_framework_mechtype;
436 		*mech1 = &vops->vo_mech;
437 		switch (optype) {
438 		case KCF_OP_INIT:
439 			*fg = CRYPTO_FG_VERIFY;
440 			break;
441 		case KCF_OP_ATOMIC:
442 			*fg = CRYPTO_FG_VERIFY_ATOMIC;
443 			break;
444 		default:
445 			ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC);
446 			*fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC;
447 		}
448 		break;
449 	}
450 
451 	case KCF_OG_ENCRYPT: {
452 		kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
453 
454 		eops->eo_mech.cm_type = eops->eo_framework_mechtype;
455 		*mech1 = &eops->eo_mech;
456 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT :
457 		    CRYPTO_FG_ENCRYPT_ATOMIC;
458 		break;
459 	}
460 
461 	case KCF_OG_DECRYPT: {
462 		kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
463 
464 		dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype;
465 		*mech1 = &dcrops->dop_mech;
466 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT :
467 		    CRYPTO_FG_DECRYPT_ATOMIC;
468 		break;
469 	}
470 
471 	case KCF_OG_ENCRYPT_MAC: {
472 		kcf_encrypt_mac_ops_params_t *eops =
473 		    &params->rp_u.encrypt_mac_params;
474 
475 		eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype;
476 		*mech1 = &eops->em_encr_mech;
477 		eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype;
478 		*mech2 = &eops->em_mac_mech;
479 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC :
480 		    CRYPTO_FG_ENCRYPT_MAC_ATOMIC;
481 		break;
482 	}
483 
484 	case KCF_OG_MAC_DECRYPT: {
485 		kcf_mac_decrypt_ops_params_t *dops =
486 		    &params->rp_u.mac_decrypt_params;
487 
488 		dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype;
489 		*mech1 = &dops->md_mac_mech;
490 		dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype;
491 		*mech2 = &dops->md_decr_mech;
492 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT :
493 		    CRYPTO_FG_MAC_DECRYPT_ATOMIC;
494 		break;
495 	}
496 
497 	default:
498 		return (B_FALSE);
499 	}
500 
501 	return (B_TRUE);
502 }
503 
504 /*
505  * This routine is called when a request to a provider has failed
506  * with a recoverable error. This routine tries to find another provider
507  * and dispatches the request to the new provider, if one is available.
508  * We reuse the request structure.
509  *
510  * A return value of NULL from kcf_get_mech_provider() indicates
511  * we have tried the last provider.
512  */
513 static int
514 kcf_resubmit_request(kcf_areq_node_t *areq)
515 {
516 	int error = CRYPTO_FAILED;
517 	kcf_context_t *ictx;
518 	kcf_provider_desc_t *old_pd;
519 	kcf_provider_desc_t *new_pd;
520 	crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
521 	crypto_mech_type_t prov_mt1, prov_mt2;
522 	crypto_func_group_t fg;
523 
524 	if (!can_resubmit(areq, &mech1, &mech2, &fg))
525 		return (error);
526 
527 	old_pd = areq->an_provider;
528 	/*
529 	 * Add old_pd to the list of providers already tried. We release
530 	 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in
531 	 * kcf_free_triedlist().
532 	 */
533 	if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
534 	    KM_NOSLEEP) == NULL)
535 		return (error);
536 
537 	if (mech1 && !mech2) {
538 		new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
539 		    areq->an_tried_plist, fg,
540 		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
541 	} else {
542 		ASSERT(mech1 != NULL && mech2 != NULL);
543 
544 		new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1,
545 		    &prov_mt2, &error, areq->an_tried_plist, fg, fg,
546 		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
547 	}
548 
549 	if (new_pd == NULL)
550 		return (error);
551 
552 	/*
553 	 * We reuse the old context by resetting provider specific
554 	 * fields in it.
555 	 */
556 	if ((ictx = areq->an_context) != NULL) {
557 		crypto_ctx_t *ctx;
558 
559 		ASSERT(old_pd == ictx->kc_prov_desc);
560 		KCF_PROV_REFRELE(ictx->kc_prov_desc);
561 		KCF_PROV_REFHOLD(new_pd);
562 		ictx->kc_prov_desc = new_pd;
563 
564 		ctx = &ictx->kc_glbl_ctx;
565 		ctx->cc_provider = new_pd->pd_prov_handle;
566 		ctx->cc_session = new_pd->pd_sid;
567 		ctx->cc_provider_private = NULL;
568 	}
569 
570 	/* We reuse areq. by resetting the provider and context fields. */
571 	KCF_PROV_REFRELE(old_pd);
572 	KCF_PROV_REFHOLD(new_pd);
573 	areq->an_provider = new_pd;
574 	mutex_enter(&areq->an_lock);
575 	areq->an_state = REQ_WAITING;
576 	mutex_exit(&areq->an_lock);
577 
578 	switch (new_pd->pd_prov_type) {
579 	case CRYPTO_SW_PROVIDER:
580 		error = kcf_disp_sw_request(areq);
581 		break;
582 
583 	case CRYPTO_HW_PROVIDER: {
584 		taskq_t *taskq = new_pd->pd_sched_info.ks_taskq;
585 
586 		if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
587 		    (taskqid_t)0) {
588 			error = CRYPTO_HOST_MEMORY;
589 		} else {
590 			error = CRYPTO_QUEUED;
591 		}
592 
593 		break;
594 	}
595 	}
596 
597 	return (error);
598 }
599 
600 #define	EMPTY_TASKQ(tq)	((tq)->tq_task.tqent_next == &(tq)->tq_task)
601 
602 /*
603  * Routine called by both ioctl and k-api. The consumer should
604  * bundle the parameters into a kcf_req_params_t structure. A bunch
605  * of macros are available in ops_impl.h for this bundling. They are:
606  *
607  * 	KCF_WRAP_DIGEST_OPS_PARAMS()
608  *	KCF_WRAP_MAC_OPS_PARAMS()
609  *	KCF_WRAP_ENCRYPT_OPS_PARAMS()
610  *	KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
611  *
612  * It is the caller's responsibility to free the ctx argument when
613  * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
614  */
615 int
616 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
617     crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
618 {
619 	int error = CRYPTO_SUCCESS;
620 	kcf_areq_node_t *areq;
621 	kcf_sreq_node_t *sreq;
622 	kcf_context_t *kcf_ctx;
623 	taskq_t *taskq = pd->pd_sched_info.ks_taskq;
624 
625 	kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
626 
627 	/* Synchronous cases */
628 	if (crq == NULL) {
629 		switch (pd->pd_prov_type) {
630 		case CRYPTO_SW_PROVIDER:
631 			error = common_submit_request(pd, ctx, params,
632 			    KCF_RHNDL(KM_SLEEP));
633 			break;
634 
635 		case CRYPTO_HW_PROVIDER:
636 			sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
637 			sreq->sn_state = REQ_ALLOCATED;
638 			sreq->sn_rv = CRYPTO_FAILED;
639 
640 			sreq->sn_params = params;
641 			KCF_PROV_REFHOLD(pd);
642 			sreq->sn_provider = pd;
643 
644 			/*
645 			 * Note that we do not need to hold the context
646 			 * for synchronous case as the context will never
647 			 * become invalid underneath us in this case.
648 			 */
649 			sreq->sn_context = kcf_ctx;
650 
651 			ASSERT(taskq != NULL);
652 			/*
653 			 * Call the SPI directly if the taskq is empty and the
654 			 * provider is not busy, else dispatch to the taskq.
655 			 * Calling directly is fine as this is the synchronous
656 			 * case. This is unlike the asynchronous case where we
657 			 * must always dispatch to the taskq.
658 			 */
659 			if (EMPTY_TASKQ(taskq) &&
660 			    pd->pd_state == KCF_PROV_READY) {
661 				process_req_hwp(sreq);
662 			} else {
663 				/*
664 				 * We can not tell from taskq_dispatch() return
665 				 * value if we exceeded maxalloc. Hence the
666 				 * check here. Since we are allowed to wait in
667 				 * the synchronous case, we wait for the taskq
668 				 * to become empty.
669 				 */
670 				if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
671 					taskq_wait(taskq);
672 				}
673 
674 				(void) taskq_dispatch(taskq, process_req_hwp,
675 				    sreq, TQ_SLEEP);
676 			}
677 
678 			/*
679 			 * Wait for the notification to arrive,
680 			 * if the operation is not done yet.
681 			 * Bug# 4722589 will make the wait a cv_wait_sig().
682 			 */
683 			mutex_enter(&sreq->sn_lock);
684 			while (sreq->sn_state < REQ_DONE)
685 				cv_wait(&sreq->sn_cv, &sreq->sn_lock);
686 			mutex_exit(&sreq->sn_lock);
687 
688 			error = sreq->sn_rv;
689 			KCF_PROV_REFRELE(sreq->sn_provider);
690 			kmem_cache_free(kcf_sreq_cache, sreq);
691 
692 			break;
693 
694 		default:
695 			error = CRYPTO_FAILED;
696 			break;
697 		}
698 
699 	} else {	/* Asynchronous cases */
700 		switch (pd->pd_prov_type) {
701 		case CRYPTO_SW_PROVIDER:
702 			if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
703 				/*
704 				 * This case has less overhead since there is
705 				 * no switching of context.
706 				 */
707 				error = common_submit_request(pd, ctx, params,
708 				    KCF_RHNDL(KM_NOSLEEP));
709 			} else {
710 				/*
711 				 * CRYPTO_ALWAYS_QUEUE is set. We need to
712 				 * queue the request and return.
713 				 */
714 				areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
715 				    params, cont);
716 				if (areq == NULL)
717 					error = CRYPTO_HOST_MEMORY;
718 				else {
719 					if (!(crq->cr_flag
720 					    & CRYPTO_SKIP_REQID)) {
721 					/*
722 					 * Set the request handle. This handle
723 					 * is used for any crypto_cancel_req(9f)
724 					 * calls from the consumer. We have to
725 					 * do this before dispatching the
726 					 * request.
727 					 */
728 					crq->cr_reqid = kcf_reqid_insert(areq);
729 					}
730 
731 					error = kcf_disp_sw_request(areq);
732 					/*
733 					 * There is an error processing this
734 					 * request. Remove the handle and
735 					 * release the request structure.
736 					 */
737 					if (error != CRYPTO_QUEUED) {
738 						if (!(crq->cr_flag
739 						    & CRYPTO_SKIP_REQID))
740 							kcf_reqid_delete(areq);
741 						KCF_AREQ_REFRELE(areq);
742 					}
743 				}
744 			}
745 			break;
746 
747 		case CRYPTO_HW_PROVIDER:
748 			/*
749 			 * We need to queue the request and return.
750 			 */
751 			areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
752 			    cont);
753 			if (areq == NULL) {
754 				error = CRYPTO_HOST_MEMORY;
755 				goto done;
756 			}
757 
758 			ASSERT(taskq != NULL);
759 			/*
760 			 * We can not tell from taskq_dispatch() return
761 			 * value if we exceeded maxalloc. Hence the check
762 			 * here.
763 			 */
764 			if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
765 				error = CRYPTO_BUSY;
766 				KCF_AREQ_REFRELE(areq);
767 				goto done;
768 			}
769 
770 			if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
771 			/*
772 			 * Set the request handle. This handle is used
773 			 * for any crypto_cancel_req(9f) calls from the
774 			 * consumer. We have to do this before dispatching
775 			 * the request.
776 			 */
777 			crq->cr_reqid = kcf_reqid_insert(areq);
778 			}
779 
780 			if (taskq_dispatch(taskq,
781 			    process_req_hwp, areq, TQ_NOSLEEP) ==
782 			    (taskqid_t)0) {
783 				error = CRYPTO_HOST_MEMORY;
784 				if (!(crq->cr_flag & CRYPTO_SKIP_REQID))
785 					kcf_reqid_delete(areq);
786 				KCF_AREQ_REFRELE(areq);
787 			} else {
788 				error = CRYPTO_QUEUED;
789 			}
790 			break;
791 
792 		default:
793 			error = CRYPTO_FAILED;
794 			break;
795 		}
796 	}
797 
798 done:
799 	return (error);
800 }
801 
802 /*
803  * We're done with this framework context, so free it. Note that freeing
804  * framework context (kcf_context) frees the global context (crypto_ctx).
805  *
806  * The provider is responsible for freeing provider private context after a
807  * final or single operation and resetting the cc_provider_private field
808  * to NULL. It should do this before it notifies the framework of the
809  * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
810  * like crypto_cancel_ctx(9f).
811  */
812 void
813 kcf_free_context(kcf_context_t *kcf_ctx)
814 {
815 	kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
816 	crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
817 	kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;
818 
819 	/* Release the second context, if any */
820 
821 	if (kcf_secondctx != NULL)
822 		KCF_CONTEXT_REFRELE(kcf_secondctx);
823 
824 	if (gctx->cc_provider_private != NULL) {
825 		mutex_enter(&pd->pd_lock);
826 		if (!KCF_IS_PROV_REMOVED(pd)) {
827 			/*
828 			 * Increment the provider's internal refcnt so it
829 			 * doesn't unregister from the framework while
830 			 * we're calling the entry point.
831 			 */
832 			KCF_PROV_IREFHOLD(pd);
833 			mutex_exit(&pd->pd_lock);
834 			(void) KCF_PROV_FREE_CONTEXT(pd, gctx);
835 			KCF_PROV_IREFRELE(pd);
836 		} else {
837 			mutex_exit(&pd->pd_lock);
838 		}
839 	}
840 
841 	/* kcf_ctx->kc_prov_desc has a hold on pd */
842 	KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);
843 
844 	/* check if this context is shared with a software provider */
845 	if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
846 	    kcf_ctx->kc_sw_prov_desc != NULL) {
847 		KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
848 	}
849 
850 	kmem_cache_free(kcf_context_cache, kcf_ctx);
851 }
852 
853 /*
854  * Free the request after releasing all the holds.
855  */
856 void
857 kcf_free_req(kcf_areq_node_t *areq)
858 {
859 	KCF_PROV_REFRELE(areq->an_provider);
860 	if (areq->an_context != NULL)
861 		KCF_CONTEXT_REFRELE(areq->an_context);
862 
863 	if (areq->an_tried_plist != NULL)
864 		kcf_free_triedlist(areq->an_tried_plist);
865 	kmem_cache_free(kcf_areq_cache, areq);
866 }
867 
868 /*
869  * Utility routine to remove a request from the chain of requests
870  * hanging off a context.
871  */
872 void
873 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
874 {
875 	kcf_areq_node_t *cur, *prev;
876 
877 	/*
878 	 * Get context lock, search for areq in the chain and remove it.
879 	 */
880 	ASSERT(ictx != NULL);
881 	mutex_enter(&ictx->kc_in_use_lock);
882 	prev = cur = ictx->kc_req_chain_first;
883 
884 	while (cur != NULL) {
885 		if (cur == areq) {
886 			if (prev == cur) {
887 				if ((ictx->kc_req_chain_first =
888 				    cur->an_ctxchain_next) == NULL)
889 					ictx->kc_req_chain_last = NULL;
890 			} else {
891 				if (cur == ictx->kc_req_chain_last)
892 					ictx->kc_req_chain_last = prev;
893 				prev->an_ctxchain_next = cur->an_ctxchain_next;
894 			}
895 
896 			break;
897 		}
898 		prev = cur;
899 		cur = cur->an_ctxchain_next;
900 	}
901 	mutex_exit(&ictx->kc_in_use_lock);
902 }
903 
904 /*
905  * Remove the specified node from the global software queue.
906  *
907  * The caller must hold the queue lock and request lock (an_lock).
908  */
909 void
910 kcf_remove_node(kcf_areq_node_t *node)
911 {
912 	kcf_areq_node_t *nextp = node->an_next;
913 	kcf_areq_node_t *prevp = node->an_prev;
914 
915 	ASSERT(mutex_owned(&gswq->gs_lock));
916 
917 	if (nextp != NULL)
918 		nextp->an_prev = prevp;
919 	else
920 		gswq->gs_last = prevp;
921 
922 	if (prevp != NULL)
923 		prevp->an_next = nextp;
924 	else
925 		gswq->gs_first = nextp;
926 
927 	ASSERT(mutex_owned(&node->an_lock));
928 	node->an_state = REQ_CANCELED;
929 }
930 
931 /*
932  * Remove and return the first node in the global software queue.
933  *
934  * The caller must hold the queue lock.
935  */
936 static kcf_areq_node_t *
937 kcf_dequeue()
938 {
939 	kcf_areq_node_t *tnode = NULL;
940 
941 	ASSERT(mutex_owned(&gswq->gs_lock));
942 	if ((tnode = gswq->gs_first) == NULL) {
943 		return (NULL);
944 	} else {
945 		ASSERT(gswq->gs_first->an_prev == NULL);
946 		gswq->gs_first = tnode->an_next;
947 		if (tnode->an_next == NULL)
948 			gswq->gs_last = NULL;
949 		else
950 			tnode->an_next->an_prev = NULL;
951 	}
952 
953 	gswq->gs_njobs--;
954 	return (tnode);
955 }
956 
957 /*
958  * Add the request node to the end of the global software queue.
959  *
960  * The caller should not hold the queue lock. Returns 0 if the
961  * request is successfully queued. Returns CRYPTO_BUSY if the limit
962  * on the number of jobs is exceeded.
963  */
964 static int
965 kcf_enqueue(kcf_areq_node_t *node)
966 {
967 	kcf_areq_node_t *tnode;
968 
969 	mutex_enter(&gswq->gs_lock);
970 
971 	if (gswq->gs_njobs >= gswq->gs_maxjobs) {
972 		mutex_exit(&gswq->gs_lock);
973 		return (CRYPTO_BUSY);
974 	}
975 
976 	if (gswq->gs_last == NULL) {
977 		gswq->gs_first = gswq->gs_last = node;
978 	} else {
979 		ASSERT(gswq->gs_last->an_next == NULL);
980 		tnode = gswq->gs_last;
981 		tnode->an_next = node;
982 		gswq->gs_last = node;
983 		node->an_prev = tnode;
984 	}
985 
986 	gswq->gs_njobs++;
987 
988 	/* an_lock not needed here as we hold gs_lock */
989 	node->an_state = REQ_WAITING;
990 
991 	mutex_exit(&gswq->gs_lock);
992 
993 	return (0);
994 }
995 
996 /*
997  * Decrement the thread pool count and signal the failover
998  * thread if we are the last one out.
999  */
1000 static void
1001 kcf_decrcnt_andsignal()
1002 {
1003 	KCF_ATOMIC_DECR(kcfpool->kp_threads);
1004 
1005 	mutex_enter(&kcfpool->kp_thread_lock);
1006 	if (kcfpool->kp_threads == 0)
1007 		cv_signal(&kcfpool->kp_nothr_cv);
1008 	mutex_exit(&kcfpool->kp_thread_lock);
1009 }
1010 
1011 /*
1012  * Function run by a thread from kcfpool to work on global software queue.
1013  * It is called from ioctl(CRYPTO_POOL_RUN, ...).
1014  */
1015 int
1016 kcf_svc_do_run(void)
1017 {
1018 	int error = 0;
1019 	clock_t rv;
1020 	clock_t timeout_val;
1021 	kcf_areq_node_t *req;
1022 	kcf_context_t *ictx;
1023 	kcf_provider_desc_t *pd;
1024 
1025 	KCF_ATOMIC_INCR(kcfpool->kp_threads);
1026 
1027 	for (;;) {
1028 		mutex_enter(&gswq->gs_lock);
1029 
1030 		while ((req = kcf_dequeue()) == NULL) {
1031 			timeout_val = ddi_get_lbolt() +
1032 			    drv_usectohz(kcf_idlethr_timeout);
1033 
1034 			KCF_ATOMIC_INCR(kcfpool->kp_idlethreads);
1035 			rv = cv_timedwait_sig(&gswq->gs_cv, &gswq->gs_lock,
1036 			    timeout_val);
1037 			KCF_ATOMIC_DECR(kcfpool->kp_idlethreads);
1038 
1039 			switch (rv) {
1040 			case 0:
1041 				/*
1042 				 * A signal (as in kill(2)) is pending. We did
1043 				 * not get any cv_signal().
1044 				 */
1045 				kcf_decrcnt_andsignal();
1046 				mutex_exit(&gswq->gs_lock);
1047 				return (EINTR);
1048 
1049 			case -1:
1050 				/*
1051 				 * Timed out and we are not signaled. Let us
1052 				 * see if this thread should exit. We should
1053 				 * keep at least kcf_minthreads.
1054 				 */
1055 				if (kcfpool->kp_threads > kcf_minthreads) {
1056 					kcf_decrcnt_andsignal();
1057 					mutex_exit(&gswq->gs_lock);
1058 					return (0);
1059 				}
1060 
1061 				/* Resume the wait for work */
1062 				break;
1063 
1064 			default:
1065 				/*
1066 				 * We are signaled to work on the queue.
1067 				 */
1068 				break;
1069 			}
1070 		}
1071 
1072 		mutex_exit(&gswq->gs_lock);
1073 
1074 		ictx = req->an_context;
1075 		if (ictx == NULL) {	/* Context-less operation */
1076 			pd = req->an_provider;
1077 			error = common_submit_request(pd, NULL,
1078 			    &req->an_params, req);
1079 			kcf_aop_done(req, error);
1080 			continue;
1081 		}
1082 
1083 		/*
1084 		 * We check if we can work on the request now.
1085 		 * Solaris does not guarantee any order on how the threads
1086 		 * are scheduled or how the waiters on a mutex are chosen.
1087 		 * So, we need to maintain our own order.
1088 		 *
1089 		 * is_my_turn is set to B_TRUE initially for a request when
1090 		 * it is enqueued and there are no other requests
1091 		 * for that context.  Note that a thread sleeping on
1092 		 * an_turn_cv is not counted as an idle thread. This is
1093 		 * because we define an idle thread as one that sleeps on the
1094 		 * global queue waiting for new requests.
1095 		 */
1096 		mutex_enter(&req->an_lock);
1097 		while (req->an_is_my_turn == B_FALSE) {
1098 			KCF_ATOMIC_INCR(kcfpool->kp_blockedthreads);
1099 			cv_wait(&req->an_turn_cv, &req->an_lock);
1100 			KCF_ATOMIC_DECR(kcfpool->kp_blockedthreads);
1101 		}
1102 
1103 		req->an_state = REQ_INPROGRESS;
1104 		mutex_exit(&req->an_lock);
1105 
1106 		pd = ictx->kc_prov_desc;
1107 		ASSERT(pd == req->an_provider);
1108 		error = common_submit_request(pd, &ictx->kc_glbl_ctx,
1109 		    &req->an_params, req);
1110 
1111 		kcf_aop_done(req, error);
1112 	}
1113 }
1114 
1115 /*
1116  * kmem_cache_alloc constructor for sync request structure.
1117  */
1118 /* ARGSUSED */
1119 static int
1120 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1121 {
1122 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
1123 
1124 	sreq->sn_type = CRYPTO_SYNCH;
1125 	cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL);
1126 	mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL);
1127 
1128 	return (0);
1129 }
1130 
1131 /* ARGSUSED */
1132 static void
1133 kcf_sreq_cache_destructor(void *buf, void *cdrarg)
1134 {
1135 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
1136 
1137 	mutex_destroy(&sreq->sn_lock);
1138 	cv_destroy(&sreq->sn_cv);
1139 }
1140 
1141 /*
1142  * kmem_cache_alloc constructor for async request structure.
1143  */
1144 /* ARGSUSED */
1145 static int
1146 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1147 {
1148 	kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1149 
1150 	areq->an_type = CRYPTO_ASYNCH;
1151 	mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL);
1152 	cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL);
1153 	cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL);
1154 
1155 	return (0);
1156 }
1157 
1158 /* ARGSUSED */
1159 static void
1160 kcf_areq_cache_destructor(void *buf, void *cdrarg)
1161 {
1162 	kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1163 
1164 	ASSERT(areq->an_refcnt == 0);
1165 	mutex_destroy(&areq->an_lock);
1166 	cv_destroy(&areq->an_done);
1167 	cv_destroy(&areq->an_turn_cv);
1168 }
1169 
1170 /*
1171  * kmem_cache_alloc constructor for kcf_context structure.
1172  */
1173 /* ARGSUSED */
1174 static int
1175 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags)
1176 {
1177 	kcf_context_t *kctx = (kcf_context_t *)buf;
1178 
1179 	mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL);
1180 
1181 	return (0);
1182 }
1183 
1184 /* ARGSUSED */
1185 static void
1186 kcf_context_cache_destructor(void *buf, void *cdrarg)
1187 {
1188 	kcf_context_t *kctx = (kcf_context_t *)buf;
1189 
1190 	ASSERT(kctx->kc_refcnt == 0);
1191 	mutex_destroy(&kctx->kc_in_use_lock);
1192 }
1193 
1194 /*
1195  * Creates and initializes all the structures needed by the framework.
1196  */
1197 void
1198 kcf_sched_init(void)
1199 {
1200 	int i;
1201 	kcf_reqid_table_t *rt;
1202 
1203 	/*
1204 	 * Create all the kmem caches needed by the framework. We set the
1205 	 * align argument to 64, to get a slab aligned to 64-byte as well as
1206 	 * have the objects (cache_chunksize) to be a 64-byte multiple.
1207 	 * This helps to avoid false sharing as this is the size of the
1208 	 * CPU cache line.
1209 	 */
1210 	kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache",
1211 	    sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor,
1212 	    kcf_sreq_cache_destructor, NULL, NULL, NULL, 0);
1213 
1214 	kcf_areq_cache = kmem_cache_create("kcf_areq_cache",
1215 	    sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor,
1216 	    kcf_areq_cache_destructor, NULL, NULL, NULL, 0);
1217 
1218 	kcf_context_cache = kmem_cache_create("kcf_context_cache",
1219 	    sizeof (struct kcf_context), 64, kcf_context_cache_constructor,
1220 	    kcf_context_cache_destructor, NULL, NULL, NULL, 0);
1221 
1222 	mutex_init(&kcf_dh_lock, NULL, MUTEX_DEFAULT, NULL);
1223 
1224 	gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP);
1225 
1226 	mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL);
1227 	cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL);
1228 	gswq->gs_njobs = 0;
1229 	compute_min_max_threads();	/* Computes gs_maxjobs also. */
1230 	gswq->gs_first = gswq->gs_last = NULL;
1231 
1232 	/* Initialize the global reqid table */
1233 	for (i = 0; i < REQID_TABLES; i++) {
1234 		rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP);
1235 		kcf_reqid_table[i] = rt;
1236 		mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL);
1237 		rt->rt_curid = i;
1238 	}
1239 
1240 	/* Allocate and initialize the thread pool */
1241 	kcfpool_alloc();
1242 
1243 	/* Initialize the event notification list variables */
1244 	mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL);
1245 	cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL);
1246 
1247 	/* Initialize the crypto_bufcall list variables */
1248 	mutex_init(&cbuf_list_lock, NULL, MUTEX_DEFAULT, NULL);
1249 	cv_init(&cbuf_list_cv, NULL, CV_DEFAULT, NULL);
1250 
1251 	/* Create the kcf kstat */
1252 	kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto",
1253 	    KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t),
1254 	    KSTAT_FLAG_VIRTUAL);
1255 
1256 	if (kcf_misc_kstat != NULL) {
1257 		kcf_misc_kstat->ks_data = &kcf_ksdata;
1258 		kcf_misc_kstat->ks_update = kcf_misc_kstat_update;
1259 		kstat_install(kcf_misc_kstat);
1260 	}
1261 }
1262 
1263 /*
1264  * This routine should only be called by drv/cryptoadm.
1265  *
1266  * kcf_sched_running flag isn't protected by a lock. But, we are safe because
1267  * the first thread ("cryptoadm refresh") calling this routine during
1268  * boot time completes before any other thread that can call this routine.
1269  */
1270 void
1271 kcf_sched_start(void)
1272 {
1273 	if (kcf_sched_running)
1274 		return;
1275 
1276 	/* Start the failover kernel thread for now */
1277 	(void) thread_create(NULL, 0, &kcf_failover_thread, 0, 0, &p0,
1278 	    TS_RUN, minclsyspri);
1279 
1280 	/* Start the background processing thread. */
1281 	(void) thread_create(NULL, 0, &crypto_bufcall_service, 0, 0, &p0,
1282 	    TS_RUN, minclsyspri);
1283 
1284 	kcf_sched_running = B_TRUE;
1285 }
1286 
1287 /*
1288  * Signal the waiting sync client.
1289  */
1290 void
1291 kcf_sop_done(kcf_sreq_node_t *sreq, int error)
1292 {
1293 	mutex_enter(&sreq->sn_lock);
1294 	sreq->sn_state = REQ_DONE;
1295 	sreq->sn_rv = error;
1296 	cv_signal(&sreq->sn_cv);
1297 	mutex_exit(&sreq->sn_lock);
1298 }
1299 
1300 /*
1301  * Callback the async client with the operation status.
1302  * We free the async request node and possibly the context.
1303  * We also handle any chain of requests hanging off of
1304  * the context.
1305  */
1306 void
1307 kcf_aop_done(kcf_areq_node_t *areq, int error)
1308 {
1309 	kcf_op_type_t optype;
1310 	boolean_t skip_notify = B_FALSE;
1311 	kcf_context_t *ictx;
1312 	kcf_areq_node_t *nextreq;
1313 
1314 	/*
1315 	 * Handle recoverable errors. This has to be done first
1316 	 * before doing any thing else in this routine so that
1317 	 * we do not change the state of the request.
1318 	 */
1319 	if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) {
1320 		/*
1321 		 * We try another provider, if one is available. Else
1322 		 * we continue with the failure notification to the
1323 		 * client.
1324 		 */
1325 		if (kcf_resubmit_request(areq) == CRYPTO_QUEUED)
1326 			return;
1327 	}
1328 
1329 	mutex_enter(&areq->an_lock);
1330 	areq->an_state = REQ_DONE;
1331 	mutex_exit(&areq->an_lock);
1332 
1333 	optype = (&areq->an_params)->rp_optype;
1334 	if ((ictx = areq->an_context) != NULL) {
1335 		/*
1336 		 * A request after it is removed from the request
1337 		 * queue, still stays on a chain of requests hanging
1338 		 * of its context structure. It needs to be removed
1339 		 * from this chain at this point.
1340 		 */
1341 		mutex_enter(&ictx->kc_in_use_lock);
1342 		nextreq = areq->an_ctxchain_next;
1343 		if (nextreq != NULL) {
1344 			mutex_enter(&nextreq->an_lock);
1345 			nextreq->an_is_my_turn = B_TRUE;
1346 			cv_signal(&nextreq->an_turn_cv);
1347 			mutex_exit(&nextreq->an_lock);
1348 		}
1349 
1350 		ictx->kc_req_chain_first = nextreq;
1351 		if (nextreq == NULL)
1352 			ictx->kc_req_chain_last = NULL;
1353 		mutex_exit(&ictx->kc_in_use_lock);
1354 
1355 		if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) {
1356 			ASSERT(nextreq == NULL);
1357 			KCF_CONTEXT_REFRELE(ictx);
1358 		} else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) {
1359 		/*
1360 		 * NOTE - We do not release the context in case of update
1361 		 * operations. We require the consumer to free it explicitly,
1362 		 * in case it wants to abandon an update operation. This is done
1363 		 * as there may be mechanisms in ECB mode that can continue
1364 		 * even if an operation on a block fails.
1365 		 */
1366 			KCF_CONTEXT_REFRELE(ictx);
1367 		}
1368 	}
1369 
1370 	/* Deal with the internal continuation to this request first */
1371 
1372 	if (areq->an_isdual) {
1373 		kcf_dual_req_t *next_arg;
1374 		next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg;
1375 		next_arg->kr_areq = areq;
1376 		KCF_AREQ_REFHOLD(areq);
1377 		areq->an_isdual = B_FALSE;
1378 
1379 		NOTIFY_CLIENT(areq, error);
1380 		return;
1381 	}
1382 
1383 	/*
1384 	 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
1385 	 * always. If this flag is clear, we skip the notification
1386 	 * provided there are no errors.  We check this flag for only
1387 	 * init or update operations. It is ignored for single, final or
1388 	 * atomic operations.
1389 	 */
1390 	skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) &&
1391 	    (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) &&
1392 	    (error == CRYPTO_SUCCESS);
1393 
1394 	if (!skip_notify) {
1395 		NOTIFY_CLIENT(areq, error);
1396 	}
1397 
1398 	if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID))
1399 		kcf_reqid_delete(areq);
1400 
1401 	KCF_AREQ_REFRELE(areq);
1402 }
1403 
1404 /*
1405  * Allocate the thread pool and initialize all the fields.
1406  */
1407 static void
1408 kcfpool_alloc()
1409 {
1410 	kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP);
1411 
1412 	kcfpool->kp_threads = kcfpool->kp_idlethreads = 0;
1413 	kcfpool->kp_blockedthreads = 0;
1414 	kcfpool->kp_signal_create_thread = B_FALSE;
1415 	kcfpool->kp_nthrs = 0;
1416 	kcfpool->kp_user_waiting = B_FALSE;
1417 
1418 	mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
1419 	cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL);
1420 
1421 	mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL);
1422 	cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL);
1423 
1424 	kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT;
1425 }
1426 
1427 /*
1428  * This function is run by the 'creator' thread in the pool.
1429  * It is called from ioctl(CRYPTO_POOL_WAIT, ...).
1430  */
1431 int
1432 kcf_svc_wait(int *nthrs)
1433 {
1434 	clock_t rv;
1435 	clock_t timeout_val;
1436 
1437 	if (kcfpool == NULL)
1438 		return (ENOENT);
1439 
1440 	mutex_enter(&kcfpool->kp_user_lock);
1441 	/* Check if there's already a user thread waiting on this kcfpool */
1442 	if (kcfpool->kp_user_waiting) {
1443 		mutex_exit(&kcfpool->kp_user_lock);
1444 		*nthrs = 0;
1445 		return (EBUSY);
1446 	}
1447 
1448 	kcfpool->kp_user_waiting = B_TRUE;
1449 
1450 	/* Go to sleep, waiting for the signaled flag. */
1451 	while (!kcfpool->kp_signal_create_thread) {
1452 		timeout_val = ddi_get_lbolt() +
1453 		    drv_usectohz(kcf_idlethr_timeout);
1454 
1455 		rv = cv_timedwait_sig(&kcfpool->kp_user_cv,
1456 		    &kcfpool->kp_user_lock, timeout_val);
1457 		switch (rv) {
1458 		case 0:
1459 			/* Interrupted, return to handle exit or signal */
1460 			kcfpool->kp_user_waiting = B_FALSE;
1461 			kcfpool->kp_signal_create_thread = B_FALSE;
1462 			mutex_exit(&kcfpool->kp_user_lock);
1463 			/*
1464 			 * kcfd is exiting. Release the door and
1465 			 * invalidate it.
1466 			 */
1467 			mutex_enter(&kcf_dh_lock);
1468 			if (kcf_dh != NULL) {
1469 				door_ki_rele(kcf_dh);
1470 				kcf_dh = NULL;
1471 			}
1472 			mutex_exit(&kcf_dh_lock);
1473 			return (EINTR);
1474 
1475 		case -1:
1476 			/* Timed out. Recalculate the min/max threads */
1477 			compute_min_max_threads();
1478 			break;
1479 
1480 		default:
1481 			/* Worker thread did a cv_signal() */
1482 			break;
1483 		}
1484 	}
1485 
1486 	kcfpool->kp_signal_create_thread = B_FALSE;
1487 	kcfpool->kp_user_waiting = B_FALSE;
1488 
1489 	*nthrs = kcfpool->kp_nthrs;
1490 	mutex_exit(&kcfpool->kp_user_lock);
1491 
1492 	/* Return to userland for possible thread creation. */
1493 	return (0);
1494 }
1495 
1496 
1497 /*
1498  * This routine introduces a locking order for gswq->gs_lock followed
1499  * by cpu_lock.
1500  * This means that no consumer of the k-api should hold cpu_lock when calling
1501  * k-api routines.
1502  */
1503 static void
1504 compute_min_max_threads()
1505 {
1506 	psetid_t psid = PS_MYID;
1507 
1508 	mutex_enter(&gswq->gs_lock);
1509 	if (cpupart_get_cpus(&psid, NULL, (uint_t *)&kcf_minthreads) != 0) {
1510 		cmn_err(CE_WARN, "kcf:compute_min_max_threads cpupart_get_cpus:"
1511 		    " failed, setting kcf_minthreads to 1");
1512 		kcf_minthreads = 1;
1513 	}
1514 	kcf_maxthreads = kcf_thr_multiple * kcf_minthreads;
1515 	gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
1516 	mutex_exit(&gswq->gs_lock);
1517 }
1518 
1519 /*
1520  * This is the main routine of the failover kernel thread.
1521  * If there are any threads in the pool we sleep. The last thread in the
1522  * pool to exit will signal us to get to work. We get back to sleep
1523  * once we detect that the pool has threads.
1524  *
1525  * Note that in the hand-off from us to a pool thread we get to run once.
1526  * Since this hand-off is a rare event this should be fine.
1527  */
1528 static void
1529 kcf_failover_thread()
1530 {
1531 	int error = 0;
1532 	kcf_context_t *ictx;
1533 	kcf_areq_node_t *req;
1534 	callb_cpr_t cpr_info;
1535 	kmutex_t cpr_lock;
1536 	static boolean_t is_logged = B_FALSE;
1537 
1538 	mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
1539 	CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr,
1540 	    "kcf_failover_thread");
1541 
1542 	for (;;) {
1543 		/*
1544 		 * Wait if there are any threads are in the pool.
1545 		 */
1546 		if (kcfpool->kp_threads > 0) {
1547 			mutex_enter(&cpr_lock);
1548 			CALLB_CPR_SAFE_BEGIN(&cpr_info);
1549 			mutex_exit(&cpr_lock);
1550 
1551 			mutex_enter(&kcfpool->kp_thread_lock);
1552 			cv_wait(&kcfpool->kp_nothr_cv,
1553 			    &kcfpool->kp_thread_lock);
1554 			mutex_exit(&kcfpool->kp_thread_lock);
1555 
1556 			mutex_enter(&cpr_lock);
1557 			CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
1558 			mutex_exit(&cpr_lock);
1559 			is_logged = B_FALSE;
1560 		}
1561 
1562 		/*
1563 		 * Get the requests from the queue and wait if needed.
1564 		 */
1565 		mutex_enter(&gswq->gs_lock);
1566 
1567 		while ((req = kcf_dequeue()) == NULL) {
1568 			mutex_enter(&cpr_lock);
1569 			CALLB_CPR_SAFE_BEGIN(&cpr_info);
1570 			mutex_exit(&cpr_lock);
1571 
1572 			KCF_ATOMIC_INCR(kcfpool->kp_idlethreads);
1573 			cv_wait(&gswq->gs_cv, &gswq->gs_lock);
1574 			KCF_ATOMIC_DECR(kcfpool->kp_idlethreads);
1575 
1576 			mutex_enter(&cpr_lock);
1577 			CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
1578 			mutex_exit(&cpr_lock);
1579 		}
1580 
1581 		mutex_exit(&gswq->gs_lock);
1582 
1583 		/*
1584 		 * We check the kp_threads since kcfd could have started
1585 		 * while we are waiting on the global software queue.
1586 		 */
1587 		if (kcfpool->kp_threads <= 0 && !is_logged) {
1588 			cmn_err(CE_WARN, "kcfd is not running. Please check "
1589 			    "and restart kcfd. Using the failover kernel "
1590 			    "thread for now.\n");
1591 			is_logged = B_TRUE;
1592 		}
1593 
1594 		/*
1595 		 * Get to work on the request.
1596 		 */
1597 		ictx = req->an_context;
1598 		mutex_enter(&req->an_lock);
1599 		req->an_state = REQ_INPROGRESS;
1600 		mutex_exit(&req->an_lock);
1601 
1602 		error = common_submit_request(req->an_provider, ictx ?
1603 		    &ictx->kc_glbl_ctx : NULL, &req->an_params, req);
1604 
1605 		kcf_aop_done(req, error);
1606 	}
1607 }
1608 
1609 /*
1610  * Insert the async request in the hash table after assigning it
1611  * an ID. Returns the ID.
1612  *
1613  * The ID is used by the caller to pass as an argument to a
1614  * cancel_req() routine later.
1615  */
1616 static crypto_req_id_t
1617 kcf_reqid_insert(kcf_areq_node_t *areq)
1618 {
1619 	int indx;
1620 	crypto_req_id_t id;
1621 	kcf_areq_node_t *headp;
1622 	kcf_reqid_table_t *rt =
1623 	    kcf_reqid_table[CPU->cpu_seqid & REQID_TABLE_MASK];
1624 
1625 	mutex_enter(&rt->rt_lock);
1626 
1627 	rt->rt_curid = id =
1628 	    (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH;
1629 	SET_REQID(areq, id);
1630 	indx = REQID_HASH(id);
1631 	headp = areq->an_idnext = rt->rt_idhash[indx];
1632 	areq->an_idprev = NULL;
1633 	if (headp != NULL)
1634 		headp->an_idprev = areq;
1635 
1636 	rt->rt_idhash[indx] = areq;
1637 	mutex_exit(&rt->rt_lock);
1638 
1639 	return (id);
1640 }
1641 
1642 /*
1643  * Delete the async request from the hash table.
1644  */
1645 static void
1646 kcf_reqid_delete(kcf_areq_node_t *areq)
1647 {
1648 	int indx;
1649 	kcf_areq_node_t *nextp, *prevp;
1650 	crypto_req_id_t id = GET_REQID(areq);
1651 	kcf_reqid_table_t *rt;
1652 
1653 	rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1654 	indx = REQID_HASH(id);
1655 
1656 	mutex_enter(&rt->rt_lock);
1657 
1658 	nextp = areq->an_idnext;
1659 	prevp = areq->an_idprev;
1660 	if (nextp != NULL)
1661 		nextp->an_idprev = prevp;
1662 	if (prevp != NULL)
1663 		prevp->an_idnext = nextp;
1664 	else
1665 		rt->rt_idhash[indx] = nextp;
1666 
1667 	SET_REQID(areq, 0);
1668 	cv_broadcast(&areq->an_done);
1669 
1670 	mutex_exit(&rt->rt_lock);
1671 }
1672 
1673 /*
1674  * Cancel a single asynchronous request.
1675  *
1676  * We guarantee that no problems will result from calling
1677  * crypto_cancel_req() for a request which is either running, or
1678  * has already completed. We remove the request from any queues
1679  * if it is possible. We wait for request completion if the
1680  * request is dispatched to a provider.
1681  *
1682  * Calling context:
1683  * 	Can be called from user context only.
1684  *
1685  * NOTE: We acquire the following locks in this routine (in order):
1686  *	- rt_lock (kcf_reqid_table_t)
1687  *	- gswq->gs_lock
1688  *	- areq->an_lock
1689  *	- ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
1690  *
1691  * This locking order MUST be maintained in code every where else.
1692  */
1693 void
1694 crypto_cancel_req(crypto_req_id_t id)
1695 {
1696 	int indx;
1697 	kcf_areq_node_t *areq;
1698 	kcf_provider_desc_t *pd;
1699 	kcf_context_t *ictx;
1700 	kcf_reqid_table_t *rt;
1701 
1702 	rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1703 	indx = REQID_HASH(id);
1704 
1705 	mutex_enter(&rt->rt_lock);
1706 	for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) {
1707 	if (GET_REQID(areq) == id) {
1708 		/*
1709 		 * We found the request. It is either still waiting
1710 		 * in the framework queues or running at the provider.
1711 		 */
1712 		pd = areq->an_provider;
1713 		ASSERT(pd != NULL);
1714 
1715 		switch (pd->pd_prov_type) {
1716 		case CRYPTO_SW_PROVIDER:
1717 			mutex_enter(&gswq->gs_lock);
1718 			mutex_enter(&areq->an_lock);
1719 
1720 			/* This request can be safely canceled. */
1721 			if (areq->an_state <= REQ_WAITING) {
1722 				/* Remove from gswq, global software queue. */
1723 				kcf_remove_node(areq);
1724 				if ((ictx = areq->an_context) != NULL)
1725 					kcf_removereq_in_ctxchain(ictx, areq);
1726 
1727 				mutex_exit(&areq->an_lock);
1728 				mutex_exit(&gswq->gs_lock);
1729 				mutex_exit(&rt->rt_lock);
1730 
1731 				/* Remove areq from hash table and free it. */
1732 				kcf_reqid_delete(areq);
1733 				KCF_AREQ_REFRELE(areq);
1734 				return;
1735 			}
1736 
1737 			mutex_exit(&areq->an_lock);
1738 			mutex_exit(&gswq->gs_lock);
1739 			break;
1740 
1741 		case CRYPTO_HW_PROVIDER:
1742 			/*
1743 			 * There is no interface to remove an entry
1744 			 * once it is on the taskq. So, we do not do
1745 			 * any thing for a hardware provider.
1746 			 */
1747 			break;
1748 		}
1749 
1750 		/*
1751 		 * The request is running. Wait for the request completion
1752 		 * to notify us.
1753 		 */
1754 		KCF_AREQ_REFHOLD(areq);
1755 		while (GET_REQID(areq) == id)
1756 			cv_wait(&areq->an_done, &rt->rt_lock);
1757 		KCF_AREQ_REFRELE(areq);
1758 		break;
1759 	}
1760 	}
1761 
1762 	mutex_exit(&rt->rt_lock);
1763 }
1764 
1765 /*
1766  * Cancel all asynchronous requests associated with the
1767  * passed in crypto context and free it.
1768  *
1769  * A client SHOULD NOT call this routine after calling a crypto_*_final
1770  * routine. This routine is called only during intermediate operations.
1771  * The client should not use the crypto context after this function returns
1772  * since we destroy it.
1773  *
1774  * Calling context:
1775  * 	Can be called from user context only.
1776  */
1777 void
1778 crypto_cancel_ctx(crypto_context_t ctx)
1779 {
1780 	kcf_context_t *ictx;
1781 	kcf_areq_node_t *areq;
1782 
1783 	if (ctx == NULL)
1784 		return;
1785 
1786 	ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private;
1787 
1788 	mutex_enter(&ictx->kc_in_use_lock);
1789 
1790 	/* Walk the chain and cancel each request */
1791 	while ((areq = ictx->kc_req_chain_first) != NULL) {
1792 		/*
1793 		 * We have to drop the lock here as we may have
1794 		 * to wait for request completion. We hold the
1795 		 * request before dropping the lock though, so that it
1796 		 * won't be freed underneath us.
1797 		 */
1798 		KCF_AREQ_REFHOLD(areq);
1799 		mutex_exit(&ictx->kc_in_use_lock);
1800 
1801 		crypto_cancel_req(GET_REQID(areq));
1802 		KCF_AREQ_REFRELE(areq);
1803 
1804 		mutex_enter(&ictx->kc_in_use_lock);
1805 	}
1806 
1807 	mutex_exit(&ictx->kc_in_use_lock);
1808 	KCF_CONTEXT_REFRELE(ictx);
1809 }
1810 
1811 /*
1812  * Update kstats.
1813  */
1814 static int
1815 kcf_misc_kstat_update(kstat_t *ksp, int rw)
1816 {
1817 	uint_t tcnt;
1818 	kcf_stats_t *ks_data;
1819 
1820 	if (rw == KSTAT_WRITE)
1821 		return (EACCES);
1822 
1823 	ks_data = ksp->ks_data;
1824 
1825 	ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads;
1826 	/*
1827 	 * The failover thread is counted in kp_idlethreads in
1828 	 * some corner cases. This is done to avoid doing more checks
1829 	 * when submitting a request. We account for those cases below.
1830 	 */
1831 	if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1))
1832 		tcnt--;
1833 	ks_data->ks_idle_thrs.value.ui32 = tcnt;
1834 	ks_data->ks_minthrs.value.ui32 = kcf_minthreads;
1835 	ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads;
1836 	ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs;
1837 	ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs;
1838 	ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads;
1839 	ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc;
1840 	ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc;
1841 
1842 	return (0);
1843 }
1844 
1845 /*
1846  * Allocate and initiatize a kcf_dual_req, used for saving the arguments of
1847  * a dual operation or an atomic operation that has to be internally
1848  * simulated with multiple single steps.
1849  * crq determines the memory allocation flags.
1850  */
1851 
1852 kcf_dual_req_t *
1853 kcf_alloc_req(crypto_call_req_t *crq)
1854 {
1855 	kcf_dual_req_t *kcr;
1856 
1857 	kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq));
1858 
1859 	if (kcr == NULL)
1860 		return (NULL);
1861 
1862 	/* Copy the whole crypto_call_req struct, as it isn't persistant */
1863 	if (crq != NULL)
1864 		kcr->kr_callreq = *crq;
1865 	else
1866 		bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t));
1867 	kcr->kr_areq = NULL;
1868 	kcr->kr_saveoffset = 0;
1869 	kcr->kr_savelen = 0;
1870 
1871 	return (kcr);
1872 }
1873 
1874 /*
1875  * Callback routine for the next part of a simulated dual part.
1876  * Schedules the next step.
1877  *
1878  * This routine can be called from interrupt context.
1879  */
1880 void
1881 kcf_next_req(void *next_req_arg, int status)
1882 {
1883 	kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
1884 	kcf_req_params_t *params = &(next_req->kr_params);
1885 	kcf_areq_node_t *areq = next_req->kr_areq;
1886 	int error = status;
1887 	kcf_provider_desc_t *pd;
1888 	crypto_dual_data_t *ct;
1889 
1890 	/* Stop the processing if an error occured at this step */
1891 	if (error != CRYPTO_SUCCESS) {
1892 out:
1893 		areq->an_reqarg = next_req->kr_callreq;
1894 		KCF_AREQ_REFRELE(areq);
1895 		kmem_free(next_req, sizeof (kcf_dual_req_t));
1896 		areq->an_isdual = B_FALSE;
1897 		kcf_aop_done(areq, error);
1898 		return;
1899 	}
1900 
1901 	switch (params->rp_opgrp) {
1902 	case KCF_OG_MAC: {
1903 
1904 		/*
1905 		 * The next req is submitted with the same reqid as the
1906 		 * first part. The consumer only got back that reqid, and
1907 		 * should still be able to cancel the operation during its
1908 		 * second step.
1909 		 */
1910 		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
1911 		crypto_ctx_template_t mac_tmpl;
1912 		kcf_mech_entry_t *me;
1913 
1914 		ct = (crypto_dual_data_t *)mops->mo_data;
1915 		mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;
1916 
1917 		/* No expected recoverable failures, so no retry list */
1918 		pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
1919 		    &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
1920 		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);
1921 
1922 		if (pd == NULL) {
1923 			error = CRYPTO_MECH_NOT_SUPPORTED;
1924 			goto out;
1925 		}
1926 		/* Validate the MAC context template here */
1927 		if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
1928 		    (mac_tmpl != NULL)) {
1929 			kcf_ctx_template_t *ctx_mac_tmpl;
1930 
1931 			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
1932 
1933 			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
1934 				KCF_PROV_REFRELE(pd);
1935 				error = CRYPTO_OLD_CTX_TEMPLATE;
1936 				goto out;
1937 			}
1938 			mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
1939 		}
1940 
1941 		break;
1942 	}
1943 	case KCF_OG_DECRYPT: {
1944 		kcf_decrypt_ops_params_t *dcrops =
1945 		    &(params->rp_u.decrypt_params);
1946 
1947 		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
1948 		/* No expected recoverable failures, so no retry list */
1949 		pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
1950 		    NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
1951 		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);
1952 
1953 		if (pd == NULL) {
1954 			error = CRYPTO_MECH_NOT_SUPPORTED;
1955 			goto out;
1956 		}
1957 		break;
1958 	}
1959 	}
1960 
1961 	/* The second step uses len2 and offset2 of the dual_data */
1962 	next_req->kr_saveoffset = ct->dd_offset1;
1963 	next_req->kr_savelen = ct->dd_len1;
1964 	ct->dd_offset1 = ct->dd_offset2;
1965 	ct->dd_len1 = ct->dd_len2;
1966 
1967 	/* preserve if the caller is restricted */
1968 	if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
1969 		areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
1970 	} else {
1971 		areq->an_reqarg.cr_flag = 0;
1972 	}
1973 
1974 	areq->an_reqarg.cr_callback_func = kcf_last_req;
1975 	areq->an_reqarg.cr_callback_arg = next_req;
1976 	areq->an_isdual = B_TRUE;
1977 
1978 	/*
1979 	 * We would like to call kcf_submit_request() here. But,
1980 	 * that is not possible as that routine allocates a new
1981 	 * kcf_areq_node_t request structure, while we need to
1982 	 * reuse the existing request structure.
1983 	 */
1984 	switch (pd->pd_prov_type) {
1985 	case CRYPTO_SW_PROVIDER:
1986 		error = common_submit_request(pd, NULL, params,
1987 		    KCF_RHNDL(KM_NOSLEEP));
1988 		break;
1989 
1990 	case CRYPTO_HW_PROVIDER: {
1991 		kcf_provider_desc_t *old_pd;
1992 		taskq_t *taskq = pd->pd_sched_info.ks_taskq;
1993 
1994 		/*
1995 		 * Set the params for the second step in the
1996 		 * dual-ops.
1997 		 */
1998 		areq->an_params = *params;
1999 		old_pd = areq->an_provider;
2000 		KCF_PROV_REFRELE(old_pd);
2001 		KCF_PROV_REFHOLD(pd);
2002 		areq->an_provider = pd;
2003 
2004 		/*
2005 		 * Note that we have to do a taskq_dispatch()
2006 		 * here as we may be in interrupt context.
2007 		 */
2008 		if (taskq_dispatch(taskq, process_req_hwp, areq,
2009 		    TQ_NOSLEEP) == (taskqid_t)0) {
2010 			error = CRYPTO_HOST_MEMORY;
2011 		} else {
2012 			error = CRYPTO_QUEUED;
2013 		}
2014 		break;
2015 	}
2016 	}
2017 
2018 	/*
2019 	 * We have to release the holds on the request and the provider
2020 	 * in all cases.
2021 	 */
2022 	KCF_AREQ_REFRELE(areq);
2023 	KCF_PROV_REFRELE(pd);
2024 
2025 	if (error != CRYPTO_QUEUED) {
2026 		/* restore, clean up, and invoke the client's callback */
2027 
2028 		ct->dd_offset1 = next_req->kr_saveoffset;
2029 		ct->dd_len1 = next_req->kr_savelen;
2030 		areq->an_reqarg = next_req->kr_callreq;
2031 		kmem_free(next_req, sizeof (kcf_dual_req_t));
2032 		areq->an_isdual = B_FALSE;
2033 		kcf_aop_done(areq, error);
2034 	}
2035 }
2036 
2037 /*
2038  * Last part of an emulated dual operation.
2039  * Clean up and restore ...
2040  */
2041 void
2042 kcf_last_req(void *last_req_arg, int status)
2043 {
2044 	kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;
2045 
2046 	kcf_req_params_t *params = &(last_req->kr_params);
2047 	kcf_areq_node_t *areq = last_req->kr_areq;
2048 	crypto_dual_data_t *ct;
2049 
2050 	switch (params->rp_opgrp) {
2051 	case KCF_OG_MAC: {
2052 		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
2053 
2054 		ct = (crypto_dual_data_t *)mops->mo_data;
2055 		break;
2056 	}
2057 	case KCF_OG_DECRYPT: {
2058 		kcf_decrypt_ops_params_t *dcrops =
2059 		    &(params->rp_u.decrypt_params);
2060 
2061 		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
2062 		break;
2063 	}
2064 	}
2065 	ct->dd_offset1 = last_req->kr_saveoffset;
2066 	ct->dd_len1 = last_req->kr_savelen;
2067 
2068 	/* The submitter used kcf_last_req as its callback */
2069 
2070 	if (areq == NULL) {
2071 		crypto_call_req_t *cr = &last_req->kr_callreq;
2072 
2073 		(*(cr->cr_callback_func))(cr->cr_callback_arg, status);
2074 		kmem_free(last_req, sizeof (kcf_dual_req_t));
2075 		return;
2076 	}
2077 	areq->an_reqarg = last_req->kr_callreq;
2078 	KCF_AREQ_REFRELE(areq);
2079 	kmem_free(last_req, sizeof (kcf_dual_req_t));
2080 	areq->an_isdual = B_FALSE;
2081 	kcf_aop_done(areq, status);
2082 }
2083