xref: /illumos-gate/usr/src/uts/common/crypto/core/kcf_sched.c (revision de81e71e031139a0a7f13b7bf64152c3faa76698)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file contains the core framework routines for the
28  * kernel cryptographic framework. These routines are at the
29  * layer, between the kernel API/ioctls and the SPI.
30  */
31 
32 #include <sys/types.h>
33 #include <sys/errno.h>
34 #include <sys/kmem.h>
35 #include <sys/proc.h>
36 #include <sys/cpuvar.h>
37 #include <sys/cpupart.h>
38 #include <sys/ksynch.h>
39 #include <sys/callb.h>
40 #include <sys/cmn_err.h>
41 #include <sys/systm.h>
42 #include <sys/sysmacros.h>
43 #include <sys/kstat.h>
44 #include <sys/crypto/common.h>
45 #include <sys/crypto/impl.h>
46 #include <sys/crypto/sched_impl.h>
47 #include <sys/crypto/api.h>
48 #include <sys/crypto/spi.h>
49 #include <sys/taskq_impl.h>
50 #include <sys/ddi.h>
51 #include <sys/sunddi.h>
52 
53 
54 kcf_global_swq_t *gswq;	/* Global software queue */
55 
56 /* Thread pool related variables */
57 static kcf_pool_t *kcfpool;	/* Thread pool of kcfd LWPs */
58 int kcf_maxthreads = 2;
59 int kcf_minthreads = 1;
60 int kcf_thr_multiple = 2;	/* Boot-time tunable for experimentation */
61 static ulong_t	kcf_idlethr_timeout;
62 static boolean_t kcf_sched_running = B_FALSE;
63 #define	KCF_DEFAULT_THRTIMEOUT	60000000	/* 60 seconds */
64 
65 /* kmem caches used by the scheduler */
66 static struct kmem_cache *kcf_sreq_cache;
67 static struct kmem_cache *kcf_areq_cache;
68 static struct kmem_cache *kcf_context_cache;
69 
70 /* Global request ID table */
71 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES];
72 
73 /* KCF stats. Not protected. */
74 static kcf_stats_t kcf_ksdata = {
75 	{ "total threads in pool",	KSTAT_DATA_UINT32},
76 	{ "idle threads in pool",	KSTAT_DATA_UINT32},
77 	{ "min threads in pool",	KSTAT_DATA_UINT32},
78 	{ "max threads in pool",	KSTAT_DATA_UINT32},
79 	{ "requests in gswq",		KSTAT_DATA_UINT32},
80 	{ "max requests in gswq",	KSTAT_DATA_UINT32},
81 	{ "threads for HW taskq",	KSTAT_DATA_UINT32},
82 	{ "minalloc for HW taskq",	KSTAT_DATA_UINT32},
83 	{ "maxalloc for HW taskq",	KSTAT_DATA_UINT32}
84 };
85 
86 static kstat_t *kcf_misc_kstat = NULL;
87 ulong_t kcf_swprov_hndl = 0;
88 
89 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *,
90     kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t);
91 static int kcf_disp_sw_request(kcf_areq_node_t *);
92 static void process_req_hwp(void *);
93 static kcf_areq_node_t	*kcf_dequeue();
94 static int kcf_enqueue(kcf_areq_node_t *);
95 static void kcf_failover_thread();
96 static void kcfpool_alloc();
97 static void kcf_reqid_delete(kcf_areq_node_t *areq);
98 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq);
99 static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
100 static void compute_min_max_threads();
101 
102 
103 /*
104  * Create a new context.
105  */
106 crypto_ctx_t *
107 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
108     crypto_session_id_t sid)
109 {
110 	crypto_ctx_t *ctx;
111 	kcf_context_t *kcf_ctx;
112 
113 	kcf_ctx = kmem_cache_alloc(kcf_context_cache,
114 	    (crq == NULL) ? KM_SLEEP : KM_NOSLEEP);
115 	if (kcf_ctx == NULL)
116 		return (NULL);
117 
118 	/* initialize the context for the consumer */
119 	kcf_ctx->kc_refcnt = 1;
120 	kcf_ctx->kc_req_chain_first = NULL;
121 	kcf_ctx->kc_req_chain_last = NULL;
122 	kcf_ctx->kc_secondctx = NULL;
123 	KCF_PROV_REFHOLD(pd);
124 	kcf_ctx->kc_prov_desc = pd;
125 	kcf_ctx->kc_sw_prov_desc = NULL;
126 	kcf_ctx->kc_mech = NULL;
127 
128 	ctx = &kcf_ctx->kc_glbl_ctx;
129 	ctx->cc_provider = pd->pd_prov_handle;
130 	ctx->cc_session = sid;
131 	ctx->cc_provider_private = NULL;
132 	ctx->cc_framework_private = (void *)kcf_ctx;
133 	ctx->cc_flags = 0;
134 	ctx->cc_opstate = NULL;
135 
136 	return (ctx);
137 }
138 
139 /*
140  * Allocate a new async request node.
141  *
142  * ictx - Framework private context pointer
143  * crq - Has callback function and argument. Should be non NULL.
144  * req - The parameters to pass to the SPI
145  */
146 static kcf_areq_node_t *
147 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
148     crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual)
149 {
150 	kcf_areq_node_t	*arptr, *areq;
151 
152 	ASSERT(crq != NULL);
153 	arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP);
154 	if (arptr == NULL)
155 		return (NULL);
156 
157 	arptr->an_state = REQ_ALLOCATED;
158 	arptr->an_reqarg = *crq;
159 	arptr->an_params = *req;
160 	arptr->an_context = ictx;
161 	arptr->an_isdual = isdual;
162 
163 	arptr->an_next = arptr->an_prev = NULL;
164 	KCF_PROV_REFHOLD(pd);
165 	arptr->an_provider = pd;
166 	arptr->an_tried_plist = NULL;
167 	arptr->an_refcnt = 1;
168 	arptr->an_idnext = arptr->an_idprev = NULL;
169 
170 	/*
171 	 * Requests for context-less operations do not use the
172 	 * fields - an_is_my_turn, and an_ctxchain_next.
173 	 */
174 	if (ictx == NULL)
175 		return (arptr);
176 
177 	KCF_CONTEXT_REFHOLD(ictx);
178 	/*
179 	 * Chain this request to the context.
180 	 */
181 	mutex_enter(&ictx->kc_in_use_lock);
182 	arptr->an_ctxchain_next = NULL;
183 	if ((areq = ictx->kc_req_chain_last) == NULL) {
184 		arptr->an_is_my_turn = B_TRUE;
185 		ictx->kc_req_chain_last =
186 		    ictx->kc_req_chain_first = arptr;
187 	} else {
188 		ASSERT(ictx->kc_req_chain_first != NULL);
189 		arptr->an_is_my_turn = B_FALSE;
190 		/* Insert the new request to the end of the chain. */
191 		areq->an_ctxchain_next = arptr;
192 		ictx->kc_req_chain_last = arptr;
193 	}
194 	mutex_exit(&ictx->kc_in_use_lock);
195 
196 	return (arptr);
197 }
198 
199 /*
200  * Queue the request node and do one of the following:
201  *	- If there is an idle thread signal it to run.
202  *	- If there is no idle thread and max running threads is not
203  *	  reached, signal the creator thread for more threads.
204  *
205  * If the two conditions above are not met, we don't need to do
206  * any thing. The request will be picked up by one of the
207  * worker threads when it becomes available.
208  */
209 static int
210 kcf_disp_sw_request(kcf_areq_node_t *areq)
211 {
212 	int err;
213 	int cnt = 0;
214 
215 	if ((err = kcf_enqueue(areq)) != 0)
216 		return (err);
217 
218 	if (kcfpool->kp_idlethreads > 0) {
219 		/* Signal an idle thread to run */
220 		mutex_enter(&gswq->gs_lock);
221 		cv_signal(&gswq->gs_cv);
222 		mutex_exit(&gswq->gs_lock);
223 
224 		return (CRYPTO_QUEUED);
225 	}
226 
227 	/*
228 	 * We keep the number of running threads to be at
229 	 * kcf_minthreads to reduce gs_lock contention.
230 	 */
231 	cnt = kcf_minthreads -
232 	    (kcfpool->kp_threads - kcfpool->kp_blockedthreads);
233 	if (cnt > 0) {
234 		/*
235 		 * The following ensures the number of threads in pool
236 		 * does not exceed kcf_maxthreads.
237 		 */
238 		cnt = min(cnt, kcf_maxthreads - kcfpool->kp_threads);
239 		if (cnt > 0) {
240 			/* Signal the creator thread for more threads */
241 			mutex_enter(&kcfpool->kp_user_lock);
242 			if (!kcfpool->kp_signal_create_thread) {
243 				kcfpool->kp_signal_create_thread = B_TRUE;
244 				kcfpool->kp_nthrs = cnt;
245 				cv_signal(&kcfpool->kp_user_cv);
246 			}
247 			mutex_exit(&kcfpool->kp_user_lock);
248 		}
249 	}
250 
251 	return (CRYPTO_QUEUED);
252 }
253 
254 /*
255  * This routine is called by the taskq associated with
256  * each hardware provider. We notify the kernel consumer
257  * via the callback routine in case of CRYPTO_SUCCESS or
258  * a failure.
259  *
260  * A request can be of type kcf_areq_node_t or of type
261  * kcf_sreq_node_t.
262  */
263 static void
264 process_req_hwp(void *ireq)
265 {
266 	int error = 0;
267 	crypto_ctx_t *ctx;
268 	kcf_call_type_t ctype;
269 	kcf_provider_desc_t *pd;
270 	kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
271 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;
272 
273 	pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
274 	    sreq->sn_provider : areq->an_provider;
275 
276 	/*
277 	 * Wait if flow control is in effect for the provider. A
278 	 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
279 	 * notification will signal us. We also get signaled if
280 	 * the provider is unregistering.
281 	 */
282 	if (pd->pd_state == KCF_PROV_BUSY) {
283 		mutex_enter(&pd->pd_lock);
284 		while (pd->pd_state == KCF_PROV_BUSY)
285 			cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
286 		mutex_exit(&pd->pd_lock);
287 	}
288 
289 	/*
290 	 * Bump the internal reference count while the request is being
291 	 * processed. This is how we know when it's safe to unregister
292 	 * a provider. This step must precede the pd_state check below.
293 	 */
294 	KCF_PROV_IREFHOLD(pd);
295 
296 	/*
297 	 * Fail the request if the provider has failed. We return a
298 	 * recoverable error and the notified clients attempt any
299 	 * recovery. For async clients this is done in kcf_aop_done()
300 	 * and for sync clients it is done in the k-api routines.
301 	 */
302 	if (pd->pd_state >= KCF_PROV_FAILED) {
303 		error = CRYPTO_DEVICE_ERROR;
304 		goto bail;
305 	}
306 
307 	if (ctype == CRYPTO_SYNCH) {
308 		mutex_enter(&sreq->sn_lock);
309 		sreq->sn_state = REQ_INPROGRESS;
310 		mutex_exit(&sreq->sn_lock);
311 
312 		ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
313 		error = common_submit_request(sreq->sn_provider, ctx,
314 		    sreq->sn_params, sreq);
315 	} else {
316 		kcf_context_t *ictx;
317 		ASSERT(ctype == CRYPTO_ASYNCH);
318 
319 		/*
320 		 * We are in the per-hardware provider thread context and
321 		 * hence can sleep. Note that the caller would have done
322 		 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
323 		 */
324 		ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;
325 
326 		mutex_enter(&areq->an_lock);
327 		/*
328 		 * We need to maintain ordering for multi-part requests.
329 		 * an_is_my_turn is set to B_TRUE initially for a request
330 		 * when it is enqueued and there are no other requests
331 		 * for that context. It is set later from kcf_aop_done() when
332 		 * the request before us in the chain of requests for the
333 		 * context completes. We get signaled at that point.
334 		 */
335 		if (ictx != NULL) {
336 			ASSERT(ictx->kc_prov_desc == areq->an_provider);
337 
338 			while (areq->an_is_my_turn == B_FALSE) {
339 				cv_wait(&areq->an_turn_cv, &areq->an_lock);
340 			}
341 		}
342 		areq->an_state = REQ_INPROGRESS;
343 		mutex_exit(&areq->an_lock);
344 
345 		error = common_submit_request(areq->an_provider, ctx,
346 		    &areq->an_params, areq);
347 	}
348 
349 bail:
350 	if (error == CRYPTO_QUEUED) {
351 		/*
352 		 * The request is queued by the provider and we should
353 		 * get a crypto_op_notification() from the provider later.
354 		 * We notify the consumer at that time.
355 		 */
356 		return;
357 	} else {		/* CRYPTO_SUCCESS or other failure */
358 		KCF_PROV_IREFRELE(pd);
359 		if (ctype == CRYPTO_SYNCH)
360 			kcf_sop_done(sreq, error);
361 		else
362 			kcf_aop_done(areq, error);
363 	}
364 }
365 
366 /*
367  * This routine checks if a request can be retried on another
368  * provider. If true, mech1 is initialized to point to the mechanism
369  * structure. mech2 is also initialized in case of a dual operation. fg
370  * is initialized to the correct crypto_func_group_t bit flag. They are
371  * initialized by this routine, so that the caller can pass them to a
372  * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
373  *
374  * We check that the request is for a init or atomic routine and that
375  * it is for one of the operation groups used from k-api .
376  */
377 static boolean_t
378 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
379     crypto_mechanism_t **mech2, crypto_func_group_t *fg)
380 {
381 	kcf_req_params_t *params;
382 	kcf_op_type_t optype;
383 
384 	params = &areq->an_params;
385 	optype = params->rp_optype;
386 
387 	if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype)))
388 		return (B_FALSE);
389 
390 	switch (params->rp_opgrp) {
391 	case KCF_OG_DIGEST: {
392 		kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
393 
394 		dops->do_mech.cm_type = dops->do_framework_mechtype;
395 		*mech1 = &dops->do_mech;
396 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST :
397 		    CRYPTO_FG_DIGEST_ATOMIC;
398 		break;
399 	}
400 
401 	case KCF_OG_MAC: {
402 		kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
403 
404 		mops->mo_mech.cm_type = mops->mo_framework_mechtype;
405 		*mech1 = &mops->mo_mech;
406 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC :
407 		    CRYPTO_FG_MAC_ATOMIC;
408 		break;
409 	}
410 
411 	case KCF_OG_SIGN: {
412 		kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
413 
414 		sops->so_mech.cm_type = sops->so_framework_mechtype;
415 		*mech1 = &sops->so_mech;
416 		switch (optype) {
417 		case KCF_OP_INIT:
418 			*fg = CRYPTO_FG_SIGN;
419 			break;
420 		case KCF_OP_ATOMIC:
421 			*fg = CRYPTO_FG_SIGN_ATOMIC;
422 			break;
423 		default:
424 			ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC);
425 			*fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC;
426 		}
427 		break;
428 	}
429 
430 	case KCF_OG_VERIFY: {
431 		kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
432 
433 		vops->vo_mech.cm_type = vops->vo_framework_mechtype;
434 		*mech1 = &vops->vo_mech;
435 		switch (optype) {
436 		case KCF_OP_INIT:
437 			*fg = CRYPTO_FG_VERIFY;
438 			break;
439 		case KCF_OP_ATOMIC:
440 			*fg = CRYPTO_FG_VERIFY_ATOMIC;
441 			break;
442 		default:
443 			ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC);
444 			*fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC;
445 		}
446 		break;
447 	}
448 
449 	case KCF_OG_ENCRYPT: {
450 		kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
451 
452 		eops->eo_mech.cm_type = eops->eo_framework_mechtype;
453 		*mech1 = &eops->eo_mech;
454 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT :
455 		    CRYPTO_FG_ENCRYPT_ATOMIC;
456 		break;
457 	}
458 
459 	case KCF_OG_DECRYPT: {
460 		kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
461 
462 		dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype;
463 		*mech1 = &dcrops->dop_mech;
464 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT :
465 		    CRYPTO_FG_DECRYPT_ATOMIC;
466 		break;
467 	}
468 
469 	case KCF_OG_ENCRYPT_MAC: {
470 		kcf_encrypt_mac_ops_params_t *eops =
471 		    &params->rp_u.encrypt_mac_params;
472 
473 		eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype;
474 		*mech1 = &eops->em_encr_mech;
475 		eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype;
476 		*mech2 = &eops->em_mac_mech;
477 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC :
478 		    CRYPTO_FG_ENCRYPT_MAC_ATOMIC;
479 		break;
480 	}
481 
482 	case KCF_OG_MAC_DECRYPT: {
483 		kcf_mac_decrypt_ops_params_t *dops =
484 		    &params->rp_u.mac_decrypt_params;
485 
486 		dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype;
487 		*mech1 = &dops->md_mac_mech;
488 		dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype;
489 		*mech2 = &dops->md_decr_mech;
490 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT :
491 		    CRYPTO_FG_MAC_DECRYPT_ATOMIC;
492 		break;
493 	}
494 
495 	default:
496 		return (B_FALSE);
497 	}
498 
499 	return (B_TRUE);
500 }
501 
502 /*
503  * This routine is called when a request to a provider has failed
504  * with a recoverable error. This routine tries to find another provider
505  * and dispatches the request to the new provider, if one is available.
506  * We reuse the request structure.
507  *
508  * A return value of NULL from kcf_get_mech_provider() indicates
509  * we have tried the last provider.
510  */
511 static int
512 kcf_resubmit_request(kcf_areq_node_t *areq)
513 {
514 	int error = CRYPTO_FAILED;
515 	kcf_context_t *ictx;
516 	kcf_provider_desc_t *old_pd;
517 	kcf_provider_desc_t *new_pd;
518 	crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
519 	crypto_mech_type_t prov_mt1, prov_mt2;
520 	crypto_func_group_t fg;
521 
522 	if (!can_resubmit(areq, &mech1, &mech2, &fg))
523 		return (error);
524 
525 	old_pd = areq->an_provider;
526 	/*
527 	 * Add old_pd to the list of providers already tried. We release
528 	 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in
529 	 * kcf_free_triedlist().
530 	 */
531 	if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
532 	    KM_NOSLEEP) == NULL)
533 		return (error);
534 
535 	if (mech1 && !mech2) {
536 		new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
537 		    areq->an_tried_plist, fg,
538 		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
539 	} else {
540 		ASSERT(mech1 != NULL && mech2 != NULL);
541 
542 		new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1,
543 		    &prov_mt2, &error, areq->an_tried_plist, fg, fg,
544 		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
545 	}
546 
547 	if (new_pd == NULL)
548 		return (error);
549 
550 	/*
551 	 * We reuse the old context by resetting provider specific
552 	 * fields in it.
553 	 */
554 	if ((ictx = areq->an_context) != NULL) {
555 		crypto_ctx_t *ctx;
556 
557 		ASSERT(old_pd == ictx->kc_prov_desc);
558 		KCF_PROV_REFRELE(ictx->kc_prov_desc);
559 		KCF_PROV_REFHOLD(new_pd);
560 		ictx->kc_prov_desc = new_pd;
561 
562 		ctx = &ictx->kc_glbl_ctx;
563 		ctx->cc_provider = new_pd->pd_prov_handle;
564 		ctx->cc_session = new_pd->pd_sid;
565 		ctx->cc_provider_private = NULL;
566 	}
567 
568 	/* We reuse areq. by resetting the provider and context fields. */
569 	KCF_PROV_REFRELE(old_pd);
570 	KCF_PROV_REFHOLD(new_pd);
571 	areq->an_provider = new_pd;
572 	mutex_enter(&areq->an_lock);
573 	areq->an_state = REQ_WAITING;
574 	mutex_exit(&areq->an_lock);
575 
576 	switch (new_pd->pd_prov_type) {
577 	case CRYPTO_SW_PROVIDER:
578 		error = kcf_disp_sw_request(areq);
579 		break;
580 
581 	case CRYPTO_HW_PROVIDER: {
582 		taskq_t *taskq = new_pd->pd_sched_info.ks_taskq;
583 
584 		if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
585 		    (taskqid_t)0) {
586 			error = CRYPTO_HOST_MEMORY;
587 		} else {
588 			error = CRYPTO_QUEUED;
589 		}
590 
591 		break;
592 	}
593 	}
594 
595 	return (error);
596 }
597 
598 #define	EMPTY_TASKQ(tq)	((tq)->tq_task.tqent_next == &(tq)->tq_task)
599 
600 /*
601  * Routine called by both ioctl and k-api. The consumer should
602  * bundle the parameters into a kcf_req_params_t structure. A bunch
603  * of macros are available in ops_impl.h for this bundling. They are:
604  *
605  * 	KCF_WRAP_DIGEST_OPS_PARAMS()
606  *	KCF_WRAP_MAC_OPS_PARAMS()
607  *	KCF_WRAP_ENCRYPT_OPS_PARAMS()
608  *	KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
609  *
610  * It is the caller's responsibility to free the ctx argument when
611  * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
612  */
613 int
614 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
615     crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
616 {
617 	int error = CRYPTO_SUCCESS;
618 	kcf_areq_node_t *areq;
619 	kcf_sreq_node_t *sreq;
620 	kcf_context_t *kcf_ctx;
621 	taskq_t *taskq = pd->pd_sched_info.ks_taskq;
622 
623 	kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
624 
625 	/* Synchronous cases */
626 	if (crq == NULL) {
627 		switch (pd->pd_prov_type) {
628 		case CRYPTO_SW_PROVIDER:
629 			error = common_submit_request(pd, ctx, params,
630 			    KCF_RHNDL(KM_SLEEP));
631 			break;
632 
633 		case CRYPTO_HW_PROVIDER:
634 			/*
635 			 * Special case for CRYPTO_SYNCHRONOUS providers that
636 			 * never return a CRYPTO_QUEUED error. We skip any
637 			 * request allocation and call the SPI directly.
638 			 */
639 			if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) &&
640 			    EMPTY_TASKQ(taskq)) {
641 				KCF_PROV_IREFHOLD(pd);
642 				if (pd->pd_state == KCF_PROV_READY) {
643 					error = common_submit_request(pd, ctx,
644 					    params, KCF_RHNDL(KM_SLEEP));
645 					KCF_PROV_IREFRELE(pd);
646 					ASSERT(error != CRYPTO_QUEUED);
647 					break;
648 				}
649 				KCF_PROV_IREFRELE(pd);
650 			}
651 
652 			sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
653 			sreq->sn_state = REQ_ALLOCATED;
654 			sreq->sn_rv = CRYPTO_FAILED;
655 			sreq->sn_params = params;
656 
657 			/*
658 			 * Note that we do not need to hold the context
659 			 * for synchronous case as the context will never
660 			 * become invalid underneath us. We do not need to hold
661 			 * the provider here either as the caller has a hold.
662 			 */
663 			sreq->sn_context = kcf_ctx;
664 			ASSERT(KCF_PROV_REFHELD(pd));
665 			sreq->sn_provider = pd;
666 
667 			ASSERT(taskq != NULL);
668 			/*
669 			 * Call the SPI directly if the taskq is empty and the
670 			 * provider is not busy, else dispatch to the taskq.
671 			 * Calling directly is fine as this is the synchronous
672 			 * case. This is unlike the asynchronous case where we
673 			 * must always dispatch to the taskq.
674 			 */
675 			if (EMPTY_TASKQ(taskq) &&
676 			    pd->pd_state == KCF_PROV_READY) {
677 				process_req_hwp(sreq);
678 			} else {
679 				/*
680 				 * We can not tell from taskq_dispatch() return
681 				 * value if we exceeded maxalloc. Hence the
682 				 * check here. Since we are allowed to wait in
683 				 * the synchronous case, we wait for the taskq
684 				 * to become empty.
685 				 */
686 				if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
687 					taskq_wait(taskq);
688 				}
689 
690 				(void) taskq_dispatch(taskq, process_req_hwp,
691 				    sreq, TQ_SLEEP);
692 			}
693 
694 			/*
695 			 * Wait for the notification to arrive,
696 			 * if the operation is not done yet.
697 			 * Bug# 4722589 will make the wait a cv_wait_sig().
698 			 */
699 			mutex_enter(&sreq->sn_lock);
700 			while (sreq->sn_state < REQ_DONE)
701 				cv_wait(&sreq->sn_cv, &sreq->sn_lock);
702 			mutex_exit(&sreq->sn_lock);
703 
704 			error = sreq->sn_rv;
705 			kmem_cache_free(kcf_sreq_cache, sreq);
706 
707 			break;
708 
709 		default:
710 			error = CRYPTO_FAILED;
711 			break;
712 		}
713 
714 	} else {	/* Asynchronous cases */
715 		switch (pd->pd_prov_type) {
716 		case CRYPTO_SW_PROVIDER:
717 			if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
718 				/*
719 				 * This case has less overhead since there is
720 				 * no switching of context.
721 				 */
722 				error = common_submit_request(pd, ctx, params,
723 				    KCF_RHNDL(KM_NOSLEEP));
724 			} else {
725 				/*
726 				 * CRYPTO_ALWAYS_QUEUE is set. We need to
727 				 * queue the request and return.
728 				 */
729 				areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
730 				    params, cont);
731 				if (areq == NULL)
732 					error = CRYPTO_HOST_MEMORY;
733 				else {
734 					if (!(crq->cr_flag
735 					    & CRYPTO_SKIP_REQID)) {
736 					/*
737 					 * Set the request handle. This handle
738 					 * is used for any crypto_cancel_req(9f)
739 					 * calls from the consumer. We have to
740 					 * do this before dispatching the
741 					 * request.
742 					 */
743 					crq->cr_reqid = kcf_reqid_insert(areq);
744 					}
745 
746 					error = kcf_disp_sw_request(areq);
747 					/*
748 					 * There is an error processing this
749 					 * request. Remove the handle and
750 					 * release the request structure.
751 					 */
752 					if (error != CRYPTO_QUEUED) {
753 						if (!(crq->cr_flag
754 						    & CRYPTO_SKIP_REQID))
755 							kcf_reqid_delete(areq);
756 						KCF_AREQ_REFRELE(areq);
757 					}
758 				}
759 			}
760 			break;
761 
762 		case CRYPTO_HW_PROVIDER:
763 			/*
764 			 * We need to queue the request and return.
765 			 */
766 			areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
767 			    cont);
768 			if (areq == NULL) {
769 				error = CRYPTO_HOST_MEMORY;
770 				goto done;
771 			}
772 
773 			ASSERT(taskq != NULL);
774 			/*
775 			 * We can not tell from taskq_dispatch() return
776 			 * value if we exceeded maxalloc. Hence the check
777 			 * here.
778 			 */
779 			if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
780 				error = CRYPTO_BUSY;
781 				KCF_AREQ_REFRELE(areq);
782 				goto done;
783 			}
784 
785 			if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
786 			/*
787 			 * Set the request handle. This handle is used
788 			 * for any crypto_cancel_req(9f) calls from the
789 			 * consumer. We have to do this before dispatching
790 			 * the request.
791 			 */
792 			crq->cr_reqid = kcf_reqid_insert(areq);
793 			}
794 
795 			if (taskq_dispatch(taskq,
796 			    process_req_hwp, areq, TQ_NOSLEEP) ==
797 			    (taskqid_t)0) {
798 				error = CRYPTO_HOST_MEMORY;
799 				if (!(crq->cr_flag & CRYPTO_SKIP_REQID))
800 					kcf_reqid_delete(areq);
801 				KCF_AREQ_REFRELE(areq);
802 			} else {
803 				error = CRYPTO_QUEUED;
804 			}
805 			break;
806 
807 		default:
808 			error = CRYPTO_FAILED;
809 			break;
810 		}
811 	}
812 
813 done:
814 	return (error);
815 }
816 
817 /*
818  * We're done with this framework context, so free it. Note that freeing
819  * framework context (kcf_context) frees the global context (crypto_ctx).
820  *
821  * The provider is responsible for freeing provider private context after a
822  * final or single operation and resetting the cc_provider_private field
823  * to NULL. It should do this before it notifies the framework of the
824  * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
825  * like crypto_cancel_ctx(9f).
826  */
827 void
828 kcf_free_context(kcf_context_t *kcf_ctx)
829 {
830 	kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
831 	crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
832 	kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;
833 
834 	/* Release the second context, if any */
835 
836 	if (kcf_secondctx != NULL)
837 		KCF_CONTEXT_REFRELE(kcf_secondctx);
838 
839 	if (gctx->cc_provider_private != NULL) {
840 		mutex_enter(&pd->pd_lock);
841 		if (!KCF_IS_PROV_REMOVED(pd)) {
842 			/*
843 			 * Increment the provider's internal refcnt so it
844 			 * doesn't unregister from the framework while
845 			 * we're calling the entry point.
846 			 */
847 			KCF_PROV_IREFHOLD(pd);
848 			mutex_exit(&pd->pd_lock);
849 			(void) KCF_PROV_FREE_CONTEXT(pd, gctx);
850 			KCF_PROV_IREFRELE(pd);
851 		} else {
852 			mutex_exit(&pd->pd_lock);
853 		}
854 	}
855 
856 	/* kcf_ctx->kc_prov_desc has a hold on pd */
857 	KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);
858 
859 	/* check if this context is shared with a software provider */
860 	if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
861 	    kcf_ctx->kc_sw_prov_desc != NULL) {
862 		KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
863 	}
864 
865 	kmem_cache_free(kcf_context_cache, kcf_ctx);
866 }
867 
868 /*
869  * Free the request after releasing all the holds.
870  */
871 void
872 kcf_free_req(kcf_areq_node_t *areq)
873 {
874 	KCF_PROV_REFRELE(areq->an_provider);
875 	if (areq->an_context != NULL)
876 		KCF_CONTEXT_REFRELE(areq->an_context);
877 
878 	if (areq->an_tried_plist != NULL)
879 		kcf_free_triedlist(areq->an_tried_plist);
880 	kmem_cache_free(kcf_areq_cache, areq);
881 }
882 
883 /*
884  * Utility routine to remove a request from the chain of requests
885  * hanging off a context.
886  */
887 void
888 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
889 {
890 	kcf_areq_node_t *cur, *prev;
891 
892 	/*
893 	 * Get context lock, search for areq in the chain and remove it.
894 	 */
895 	ASSERT(ictx != NULL);
896 	mutex_enter(&ictx->kc_in_use_lock);
897 	prev = cur = ictx->kc_req_chain_first;
898 
899 	while (cur != NULL) {
900 		if (cur == areq) {
901 			if (prev == cur) {
902 				if ((ictx->kc_req_chain_first =
903 				    cur->an_ctxchain_next) == NULL)
904 					ictx->kc_req_chain_last = NULL;
905 			} else {
906 				if (cur == ictx->kc_req_chain_last)
907 					ictx->kc_req_chain_last = prev;
908 				prev->an_ctxchain_next = cur->an_ctxchain_next;
909 			}
910 
911 			break;
912 		}
913 		prev = cur;
914 		cur = cur->an_ctxchain_next;
915 	}
916 	mutex_exit(&ictx->kc_in_use_lock);
917 }
918 
919 /*
920  * Remove the specified node from the global software queue.
921  *
922  * The caller must hold the queue lock and request lock (an_lock).
923  */
924 void
925 kcf_remove_node(kcf_areq_node_t *node)
926 {
927 	kcf_areq_node_t *nextp = node->an_next;
928 	kcf_areq_node_t *prevp = node->an_prev;
929 
930 	ASSERT(mutex_owned(&gswq->gs_lock));
931 
932 	if (nextp != NULL)
933 		nextp->an_prev = prevp;
934 	else
935 		gswq->gs_last = prevp;
936 
937 	if (prevp != NULL)
938 		prevp->an_next = nextp;
939 	else
940 		gswq->gs_first = nextp;
941 
942 	ASSERT(mutex_owned(&node->an_lock));
943 	node->an_state = REQ_CANCELED;
944 }
945 
946 /*
947  * Remove and return the first node in the global software queue.
948  *
949  * The caller must hold the queue lock.
950  */
951 static kcf_areq_node_t *
952 kcf_dequeue()
953 {
954 	kcf_areq_node_t *tnode = NULL;
955 
956 	ASSERT(mutex_owned(&gswq->gs_lock));
957 	if ((tnode = gswq->gs_first) == NULL) {
958 		return (NULL);
959 	} else {
960 		ASSERT(gswq->gs_first->an_prev == NULL);
961 		gswq->gs_first = tnode->an_next;
962 		if (tnode->an_next == NULL)
963 			gswq->gs_last = NULL;
964 		else
965 			tnode->an_next->an_prev = NULL;
966 	}
967 
968 	gswq->gs_njobs--;
969 	return (tnode);
970 }
971 
972 /*
973  * Add the request node to the end of the global software queue.
974  *
975  * The caller should not hold the queue lock. Returns 0 if the
976  * request is successfully queued. Returns CRYPTO_BUSY if the limit
977  * on the number of jobs is exceeded.
978  */
979 static int
980 kcf_enqueue(kcf_areq_node_t *node)
981 {
982 	kcf_areq_node_t *tnode;
983 
984 	mutex_enter(&gswq->gs_lock);
985 
986 	if (gswq->gs_njobs >= gswq->gs_maxjobs) {
987 		mutex_exit(&gswq->gs_lock);
988 		return (CRYPTO_BUSY);
989 	}
990 
991 	if (gswq->gs_last == NULL) {
992 		gswq->gs_first = gswq->gs_last = node;
993 	} else {
994 		ASSERT(gswq->gs_last->an_next == NULL);
995 		tnode = gswq->gs_last;
996 		tnode->an_next = node;
997 		gswq->gs_last = node;
998 		node->an_prev = tnode;
999 	}
1000 
1001 	gswq->gs_njobs++;
1002 
1003 	/* an_lock not needed here as we hold gs_lock */
1004 	node->an_state = REQ_WAITING;
1005 
1006 	mutex_exit(&gswq->gs_lock);
1007 
1008 	return (0);
1009 }
1010 
1011 /*
1012  * Decrement the thread pool count and signal the failover
1013  * thread if we are the last one out.
1014  */
1015 static void
1016 kcf_decrcnt_andsignal()
1017 {
1018 	KCF_ATOMIC_DECR(kcfpool->kp_threads);
1019 
1020 	mutex_enter(&kcfpool->kp_thread_lock);
1021 	if (kcfpool->kp_threads == 0)
1022 		cv_signal(&kcfpool->kp_nothr_cv);
1023 	mutex_exit(&kcfpool->kp_thread_lock);
1024 }
1025 
1026 /*
1027  * Function run by a thread from kcfpool to work on global software queue.
1028  * It is called from ioctl(CRYPTO_POOL_RUN, ...).
1029  */
1030 int
1031 kcf_svc_do_run(void)
1032 {
1033 	int error = 0;
1034 	clock_t rv;
1035 	clock_t timeout_val;
1036 	kcf_areq_node_t *req;
1037 	kcf_context_t *ictx;
1038 	kcf_provider_desc_t *pd;
1039 
1040 	KCF_ATOMIC_INCR(kcfpool->kp_threads);
1041 
1042 	for (;;) {
1043 		mutex_enter(&gswq->gs_lock);
1044 
1045 		while ((req = kcf_dequeue()) == NULL) {
1046 			timeout_val = ddi_get_lbolt() +
1047 			    drv_usectohz(kcf_idlethr_timeout);
1048 
1049 			KCF_ATOMIC_INCR(kcfpool->kp_idlethreads);
1050 			rv = cv_timedwait_sig(&gswq->gs_cv, &gswq->gs_lock,
1051 			    timeout_val);
1052 			KCF_ATOMIC_DECR(kcfpool->kp_idlethreads);
1053 
1054 			switch (rv) {
1055 			case 0:
1056 				/*
1057 				 * A signal (as in kill(2)) is pending. We did
1058 				 * not get any cv_signal().
1059 				 */
1060 				kcf_decrcnt_andsignal();
1061 				mutex_exit(&gswq->gs_lock);
1062 				return (EINTR);
1063 
1064 			case -1:
1065 				/*
1066 				 * Timed out and we are not signaled. Let us
1067 				 * see if this thread should exit. We should
1068 				 * keep at least kcf_minthreads.
1069 				 */
1070 				if (kcfpool->kp_threads > kcf_minthreads) {
1071 					kcf_decrcnt_andsignal();
1072 					mutex_exit(&gswq->gs_lock);
1073 					return (0);
1074 				}
1075 
1076 				/* Resume the wait for work */
1077 				break;
1078 
1079 			default:
1080 				/*
1081 				 * We are signaled to work on the queue.
1082 				 */
1083 				break;
1084 			}
1085 		}
1086 
1087 		mutex_exit(&gswq->gs_lock);
1088 
1089 		ictx = req->an_context;
1090 		if (ictx == NULL) {	/* Context-less operation */
1091 			pd = req->an_provider;
1092 			error = common_submit_request(pd, NULL,
1093 			    &req->an_params, req);
1094 			kcf_aop_done(req, error);
1095 			continue;
1096 		}
1097 
1098 		/*
1099 		 * We check if we can work on the request now.
1100 		 * Solaris does not guarantee any order on how the threads
1101 		 * are scheduled or how the waiters on a mutex are chosen.
1102 		 * So, we need to maintain our own order.
1103 		 *
1104 		 * is_my_turn is set to B_TRUE initially for a request when
1105 		 * it is enqueued and there are no other requests
1106 		 * for that context.  Note that a thread sleeping on
1107 		 * an_turn_cv is not counted as an idle thread. This is
1108 		 * because we define an idle thread as one that sleeps on the
1109 		 * global queue waiting for new requests.
1110 		 */
1111 		mutex_enter(&req->an_lock);
1112 		while (req->an_is_my_turn == B_FALSE) {
1113 			KCF_ATOMIC_INCR(kcfpool->kp_blockedthreads);
1114 			cv_wait(&req->an_turn_cv, &req->an_lock);
1115 			KCF_ATOMIC_DECR(kcfpool->kp_blockedthreads);
1116 		}
1117 
1118 		req->an_state = REQ_INPROGRESS;
1119 		mutex_exit(&req->an_lock);
1120 
1121 		pd = ictx->kc_prov_desc;
1122 		ASSERT(pd == req->an_provider);
1123 		error = common_submit_request(pd, &ictx->kc_glbl_ctx,
1124 		    &req->an_params, req);
1125 
1126 		kcf_aop_done(req, error);
1127 	}
1128 }
1129 
1130 /*
1131  * kmem_cache_alloc constructor for sync request structure.
1132  */
1133 /* ARGSUSED */
1134 static int
1135 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1136 {
1137 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
1138 
1139 	sreq->sn_type = CRYPTO_SYNCH;
1140 	cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL);
1141 	mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL);
1142 
1143 	return (0);
1144 }
1145 
1146 /* ARGSUSED */
1147 static void
1148 kcf_sreq_cache_destructor(void *buf, void *cdrarg)
1149 {
1150 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
1151 
1152 	mutex_destroy(&sreq->sn_lock);
1153 	cv_destroy(&sreq->sn_cv);
1154 }
1155 
1156 /*
1157  * kmem_cache_alloc constructor for async request structure.
1158  */
1159 /* ARGSUSED */
1160 static int
1161 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1162 {
1163 	kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1164 
1165 	areq->an_type = CRYPTO_ASYNCH;
1166 	areq->an_refcnt = 0;
1167 	mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL);
1168 	cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL);
1169 	cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL);
1170 
1171 	return (0);
1172 }
1173 
1174 /* ARGSUSED */
1175 static void
1176 kcf_areq_cache_destructor(void *buf, void *cdrarg)
1177 {
1178 	kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1179 
1180 	ASSERT(areq->an_refcnt == 0);
1181 	mutex_destroy(&areq->an_lock);
1182 	cv_destroy(&areq->an_done);
1183 	cv_destroy(&areq->an_turn_cv);
1184 }
1185 
1186 /*
1187  * kmem_cache_alloc constructor for kcf_context structure.
1188  */
1189 /* ARGSUSED */
1190 static int
1191 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags)
1192 {
1193 	kcf_context_t *kctx = (kcf_context_t *)buf;
1194 
1195 	kctx->kc_refcnt = 0;
1196 	mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL);
1197 
1198 	return (0);
1199 }
1200 
1201 /* ARGSUSED */
1202 static void
1203 kcf_context_cache_destructor(void *buf, void *cdrarg)
1204 {
1205 	kcf_context_t *kctx = (kcf_context_t *)buf;
1206 
1207 	ASSERT(kctx->kc_refcnt == 0);
1208 	mutex_destroy(&kctx->kc_in_use_lock);
1209 }
1210 
1211 /*
1212  * Creates and initializes all the structures needed by the framework.
1213  */
1214 void
1215 kcf_sched_init(void)
1216 {
1217 	int i;
1218 	kcf_reqid_table_t *rt;
1219 
1220 	/*
1221 	 * Create all the kmem caches needed by the framework. We set the
1222 	 * align argument to 64, to get a slab aligned to 64-byte as well as
1223 	 * have the objects (cache_chunksize) to be a 64-byte multiple.
1224 	 * This helps to avoid false sharing as this is the size of the
1225 	 * CPU cache line.
1226 	 */
1227 	kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache",
1228 	    sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor,
1229 	    kcf_sreq_cache_destructor, NULL, NULL, NULL, 0);
1230 
1231 	kcf_areq_cache = kmem_cache_create("kcf_areq_cache",
1232 	    sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor,
1233 	    kcf_areq_cache_destructor, NULL, NULL, NULL, 0);
1234 
1235 	kcf_context_cache = kmem_cache_create("kcf_context_cache",
1236 	    sizeof (struct kcf_context), 64, kcf_context_cache_constructor,
1237 	    kcf_context_cache_destructor, NULL, NULL, NULL, 0);
1238 
1239 	mutex_init(&kcf_dh_lock, NULL, MUTEX_DEFAULT, NULL);
1240 
1241 	gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP);
1242 
1243 	mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL);
1244 	cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL);
1245 	gswq->gs_njobs = 0;
1246 	gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
1247 	gswq->gs_first = gswq->gs_last = NULL;
1248 
1249 	/* Initialize the global reqid table */
1250 	for (i = 0; i < REQID_TABLES; i++) {
1251 		rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP);
1252 		kcf_reqid_table[i] = rt;
1253 		mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL);
1254 		rt->rt_curid = i;
1255 	}
1256 
1257 	/* Allocate and initialize the thread pool */
1258 	kcfpool_alloc();
1259 
1260 	/* Initialize the event notification list variables */
1261 	mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL);
1262 	cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL);
1263 
1264 	/* Initialize the crypto_bufcall list variables */
1265 	mutex_init(&cbuf_list_lock, NULL, MUTEX_DEFAULT, NULL);
1266 	cv_init(&cbuf_list_cv, NULL, CV_DEFAULT, NULL);
1267 
1268 	/* Create the kcf kstat */
1269 	kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto",
1270 	    KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t),
1271 	    KSTAT_FLAG_VIRTUAL);
1272 
1273 	if (kcf_misc_kstat != NULL) {
1274 		kcf_misc_kstat->ks_data = &kcf_ksdata;
1275 		kcf_misc_kstat->ks_update = kcf_misc_kstat_update;
1276 		kstat_install(kcf_misc_kstat);
1277 	}
1278 }
1279 
1280 /*
1281  * This routine should only be called by drv/cryptoadm.
1282  *
1283  * kcf_sched_running flag isn't protected by a lock. But, we are safe because
1284  * the first thread ("cryptoadm refresh") calling this routine during
1285  * boot time completes before any other thread that can call this routine.
1286  */
1287 void
1288 kcf_sched_start(void)
1289 {
1290 	if (kcf_sched_running)
1291 		return;
1292 
1293 	/* Start the failover kernel thread for now */
1294 	(void) thread_create(NULL, 0, &kcf_failover_thread, 0, 0, &p0,
1295 	    TS_RUN, minclsyspri);
1296 
1297 	/* Start the background processing thread. */
1298 	(void) thread_create(NULL, 0, &crypto_bufcall_service, 0, 0, &p0,
1299 	    TS_RUN, minclsyspri);
1300 
1301 	kcf_sched_running = B_TRUE;
1302 }
1303 
1304 /*
1305  * Signal the waiting sync client.
1306  */
1307 void
1308 kcf_sop_done(kcf_sreq_node_t *sreq, int error)
1309 {
1310 	mutex_enter(&sreq->sn_lock);
1311 	sreq->sn_state = REQ_DONE;
1312 	sreq->sn_rv = error;
1313 	cv_signal(&sreq->sn_cv);
1314 	mutex_exit(&sreq->sn_lock);
1315 }
1316 
1317 /*
1318  * Callback the async client with the operation status.
1319  * We free the async request node and possibly the context.
1320  * We also handle any chain of requests hanging off of
1321  * the context.
1322  */
1323 void
1324 kcf_aop_done(kcf_areq_node_t *areq, int error)
1325 {
1326 	kcf_op_type_t optype;
1327 	boolean_t skip_notify = B_FALSE;
1328 	kcf_context_t *ictx;
1329 	kcf_areq_node_t *nextreq;
1330 
1331 	/*
1332 	 * Handle recoverable errors. This has to be done first
1333 	 * before doing any thing else in this routine so that
1334 	 * we do not change the state of the request.
1335 	 */
1336 	if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) {
1337 		/*
1338 		 * We try another provider, if one is available. Else
1339 		 * we continue with the failure notification to the
1340 		 * client.
1341 		 */
1342 		if (kcf_resubmit_request(areq) == CRYPTO_QUEUED)
1343 			return;
1344 	}
1345 
1346 	mutex_enter(&areq->an_lock);
1347 	areq->an_state = REQ_DONE;
1348 	mutex_exit(&areq->an_lock);
1349 
1350 	optype = (&areq->an_params)->rp_optype;
1351 	if ((ictx = areq->an_context) != NULL) {
1352 		/*
1353 		 * A request after it is removed from the request
1354 		 * queue, still stays on a chain of requests hanging
1355 		 * of its context structure. It needs to be removed
1356 		 * from this chain at this point.
1357 		 */
1358 		mutex_enter(&ictx->kc_in_use_lock);
1359 		nextreq = areq->an_ctxchain_next;
1360 		if (nextreq != NULL) {
1361 			mutex_enter(&nextreq->an_lock);
1362 			nextreq->an_is_my_turn = B_TRUE;
1363 			cv_signal(&nextreq->an_turn_cv);
1364 			mutex_exit(&nextreq->an_lock);
1365 		}
1366 
1367 		ictx->kc_req_chain_first = nextreq;
1368 		if (nextreq == NULL)
1369 			ictx->kc_req_chain_last = NULL;
1370 		mutex_exit(&ictx->kc_in_use_lock);
1371 
1372 		if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) {
1373 			ASSERT(nextreq == NULL);
1374 			KCF_CONTEXT_REFRELE(ictx);
1375 		} else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) {
1376 		/*
1377 		 * NOTE - We do not release the context in case of update
1378 		 * operations. We require the consumer to free it explicitly,
1379 		 * in case it wants to abandon an update operation. This is done
1380 		 * as there may be mechanisms in ECB mode that can continue
1381 		 * even if an operation on a block fails.
1382 		 */
1383 			KCF_CONTEXT_REFRELE(ictx);
1384 		}
1385 	}
1386 
1387 	/* Deal with the internal continuation to this request first */
1388 
1389 	if (areq->an_isdual) {
1390 		kcf_dual_req_t *next_arg;
1391 		next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg;
1392 		next_arg->kr_areq = areq;
1393 		KCF_AREQ_REFHOLD(areq);
1394 		areq->an_isdual = B_FALSE;
1395 
1396 		NOTIFY_CLIENT(areq, error);
1397 		return;
1398 	}
1399 
1400 	/*
1401 	 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
1402 	 * always. If this flag is clear, we skip the notification
1403 	 * provided there are no errors.  We check this flag for only
1404 	 * init or update operations. It is ignored for single, final or
1405 	 * atomic operations.
1406 	 */
1407 	skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) &&
1408 	    (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) &&
1409 	    (error == CRYPTO_SUCCESS);
1410 
1411 	if (!skip_notify) {
1412 		NOTIFY_CLIENT(areq, error);
1413 	}
1414 
1415 	if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID))
1416 		kcf_reqid_delete(areq);
1417 
1418 	KCF_AREQ_REFRELE(areq);
1419 }
1420 
1421 /*
1422  * Allocate the thread pool and initialize all the fields.
1423  */
1424 static void
1425 kcfpool_alloc()
1426 {
1427 	kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP);
1428 
1429 	kcfpool->kp_threads = kcfpool->kp_idlethreads = 0;
1430 	kcfpool->kp_blockedthreads = 0;
1431 	kcfpool->kp_signal_create_thread = B_FALSE;
1432 	kcfpool->kp_nthrs = 0;
1433 	kcfpool->kp_user_waiting = B_FALSE;
1434 
1435 	mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
1436 	cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL);
1437 
1438 	mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL);
1439 	cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL);
1440 
1441 	kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT;
1442 }
1443 
1444 /*
1445  * This function is run by the 'creator' thread in the pool.
1446  * It is called from ioctl(CRYPTO_POOL_WAIT, ...).
1447  */
1448 int
1449 kcf_svc_wait(int *nthrs)
1450 {
1451 	clock_t rv;
1452 	clock_t timeout_val;
1453 
1454 	if (kcfpool == NULL)
1455 		return (ENOENT);
1456 
1457 	mutex_enter(&kcfpool->kp_user_lock);
1458 	/* Check if there's already a user thread waiting on this kcfpool */
1459 	if (kcfpool->kp_user_waiting) {
1460 		mutex_exit(&kcfpool->kp_user_lock);
1461 		*nthrs = 0;
1462 		return (EBUSY);
1463 	}
1464 
1465 	kcfpool->kp_user_waiting = B_TRUE;
1466 
1467 	/* Go to sleep, waiting for the signaled flag. */
1468 	while (!kcfpool->kp_signal_create_thread) {
1469 		timeout_val = ddi_get_lbolt() +
1470 		    drv_usectohz(kcf_idlethr_timeout);
1471 
1472 		rv = cv_timedwait_sig(&kcfpool->kp_user_cv,
1473 		    &kcfpool->kp_user_lock, timeout_val);
1474 		switch (rv) {
1475 		case 0:
1476 			/* Interrupted, return to handle exit or signal */
1477 			kcfpool->kp_user_waiting = B_FALSE;
1478 			kcfpool->kp_signal_create_thread = B_FALSE;
1479 			mutex_exit(&kcfpool->kp_user_lock);
1480 			/*
1481 			 * kcfd is exiting. Release the door and
1482 			 * invalidate it.
1483 			 */
1484 			mutex_enter(&kcf_dh_lock);
1485 			if (kcf_dh != NULL) {
1486 				door_ki_rele(kcf_dh);
1487 				kcf_dh = NULL;
1488 			}
1489 			mutex_exit(&kcf_dh_lock);
1490 			return (EINTR);
1491 
1492 		case -1:
1493 			/* Timed out. Recalculate the min/max threads */
1494 			compute_min_max_threads();
1495 			break;
1496 
1497 		default:
1498 			/* Worker thread did a cv_signal() */
1499 			break;
1500 		}
1501 	}
1502 
1503 	kcfpool->kp_signal_create_thread = B_FALSE;
1504 	kcfpool->kp_user_waiting = B_FALSE;
1505 
1506 	*nthrs = kcfpool->kp_nthrs;
1507 	mutex_exit(&kcfpool->kp_user_lock);
1508 
1509 	/* Return to userland for possible thread creation. */
1510 	return (0);
1511 }
1512 
1513 
1514 /*
1515  * This routine introduces a locking order for gswq->gs_lock followed
1516  * by cpu_lock.
1517  * This means that no consumer of the k-api should hold cpu_lock when calling
1518  * k-api routines.
1519  */
1520 static void
1521 compute_min_max_threads()
1522 {
1523 	mutex_enter(&gswq->gs_lock);
1524 	mutex_enter(&cpu_lock);
1525 	kcf_minthreads = curthread->t_cpupart->cp_ncpus;
1526 	mutex_exit(&cpu_lock);
1527 	kcf_maxthreads = kcf_thr_multiple * kcf_minthreads;
1528 	gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
1529 	mutex_exit(&gswq->gs_lock);
1530 }
1531 
1532 /*
1533  * This is the main routine of the failover kernel thread.
1534  * If there are any threads in the pool we sleep. The last thread in the
1535  * pool to exit will signal us to get to work. We get back to sleep
1536  * once we detect that the pool has threads.
1537  *
1538  * Note that in the hand-off from us to a pool thread we get to run once.
1539  * Since this hand-off is a rare event this should be fine.
1540  */
1541 static void
1542 kcf_failover_thread()
1543 {
1544 	int error = 0;
1545 	kcf_context_t *ictx;
1546 	kcf_areq_node_t *req;
1547 	callb_cpr_t cpr_info;
1548 	kmutex_t cpr_lock;
1549 	static boolean_t is_logged = B_FALSE;
1550 
1551 	mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL);
1552 	CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr,
1553 	    "kcf_failover_thread");
1554 
1555 	for (;;) {
1556 		/*
1557 		 * Wait if there are any threads are in the pool.
1558 		 */
1559 		if (kcfpool->kp_threads > 0) {
1560 			mutex_enter(&cpr_lock);
1561 			CALLB_CPR_SAFE_BEGIN(&cpr_info);
1562 			mutex_exit(&cpr_lock);
1563 
1564 			mutex_enter(&kcfpool->kp_thread_lock);
1565 			cv_wait(&kcfpool->kp_nothr_cv,
1566 			    &kcfpool->kp_thread_lock);
1567 			mutex_exit(&kcfpool->kp_thread_lock);
1568 
1569 			mutex_enter(&cpr_lock);
1570 			CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
1571 			mutex_exit(&cpr_lock);
1572 			is_logged = B_FALSE;
1573 		}
1574 
1575 		/*
1576 		 * Get the requests from the queue and wait if needed.
1577 		 */
1578 		mutex_enter(&gswq->gs_lock);
1579 
1580 		while ((req = kcf_dequeue()) == NULL) {
1581 			mutex_enter(&cpr_lock);
1582 			CALLB_CPR_SAFE_BEGIN(&cpr_info);
1583 			mutex_exit(&cpr_lock);
1584 
1585 			KCF_ATOMIC_INCR(kcfpool->kp_idlethreads);
1586 			cv_wait(&gswq->gs_cv, &gswq->gs_lock);
1587 			KCF_ATOMIC_DECR(kcfpool->kp_idlethreads);
1588 
1589 			mutex_enter(&cpr_lock);
1590 			CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock);
1591 			mutex_exit(&cpr_lock);
1592 		}
1593 
1594 		mutex_exit(&gswq->gs_lock);
1595 
1596 		/*
1597 		 * We check the kp_threads since kcfd could have started
1598 		 * while we are waiting on the global software queue.
1599 		 */
1600 		if (kcfpool->kp_threads <= 0 && !is_logged) {
1601 			cmn_err(CE_WARN, "kcfd is not running. Please check "
1602 			    "and restart kcfd. Using the failover kernel "
1603 			    "thread for now.\n");
1604 			is_logged = B_TRUE;
1605 		}
1606 
1607 		/*
1608 		 * Get to work on the request.
1609 		 */
1610 		ictx = req->an_context;
1611 		mutex_enter(&req->an_lock);
1612 		req->an_state = REQ_INPROGRESS;
1613 		mutex_exit(&req->an_lock);
1614 
1615 		error = common_submit_request(req->an_provider, ictx ?
1616 		    &ictx->kc_glbl_ctx : NULL, &req->an_params, req);
1617 
1618 		kcf_aop_done(req, error);
1619 	}
1620 }
1621 
1622 /*
1623  * Insert the async request in the hash table after assigning it
1624  * an ID. Returns the ID.
1625  *
1626  * The ID is used by the caller to pass as an argument to a
1627  * cancel_req() routine later.
1628  */
1629 static crypto_req_id_t
1630 kcf_reqid_insert(kcf_areq_node_t *areq)
1631 {
1632 	int indx;
1633 	crypto_req_id_t id;
1634 	kcf_areq_node_t *headp;
1635 	kcf_reqid_table_t *rt =
1636 	    kcf_reqid_table[CPU->cpu_seqid & REQID_TABLE_MASK];
1637 
1638 	mutex_enter(&rt->rt_lock);
1639 
1640 	rt->rt_curid = id =
1641 	    (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH;
1642 	SET_REQID(areq, id);
1643 	indx = REQID_HASH(id);
1644 	headp = areq->an_idnext = rt->rt_idhash[indx];
1645 	areq->an_idprev = NULL;
1646 	if (headp != NULL)
1647 		headp->an_idprev = areq;
1648 
1649 	rt->rt_idhash[indx] = areq;
1650 	mutex_exit(&rt->rt_lock);
1651 
1652 	return (id);
1653 }
1654 
1655 /*
1656  * Delete the async request from the hash table.
1657  */
1658 static void
1659 kcf_reqid_delete(kcf_areq_node_t *areq)
1660 {
1661 	int indx;
1662 	kcf_areq_node_t *nextp, *prevp;
1663 	crypto_req_id_t id = GET_REQID(areq);
1664 	kcf_reqid_table_t *rt;
1665 
1666 	rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1667 	indx = REQID_HASH(id);
1668 
1669 	mutex_enter(&rt->rt_lock);
1670 
1671 	nextp = areq->an_idnext;
1672 	prevp = areq->an_idprev;
1673 	if (nextp != NULL)
1674 		nextp->an_idprev = prevp;
1675 	if (prevp != NULL)
1676 		prevp->an_idnext = nextp;
1677 	else
1678 		rt->rt_idhash[indx] = nextp;
1679 
1680 	SET_REQID(areq, 0);
1681 	cv_broadcast(&areq->an_done);
1682 
1683 	mutex_exit(&rt->rt_lock);
1684 }
1685 
1686 /*
1687  * Cancel a single asynchronous request.
1688  *
1689  * We guarantee that no problems will result from calling
1690  * crypto_cancel_req() for a request which is either running, or
1691  * has already completed. We remove the request from any queues
1692  * if it is possible. We wait for request completion if the
1693  * request is dispatched to a provider.
1694  *
1695  * Calling context:
1696  * 	Can be called from user context only.
1697  *
1698  * NOTE: We acquire the following locks in this routine (in order):
1699  *	- rt_lock (kcf_reqid_table_t)
1700  *	- gswq->gs_lock
1701  *	- areq->an_lock
1702  *	- ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
1703  *
1704  * This locking order MUST be maintained in code every where else.
1705  */
1706 void
1707 crypto_cancel_req(crypto_req_id_t id)
1708 {
1709 	int indx;
1710 	kcf_areq_node_t *areq;
1711 	kcf_provider_desc_t *pd;
1712 	kcf_context_t *ictx;
1713 	kcf_reqid_table_t *rt;
1714 
1715 	rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1716 	indx = REQID_HASH(id);
1717 
1718 	mutex_enter(&rt->rt_lock);
1719 	for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) {
1720 	if (GET_REQID(areq) == id) {
1721 		/*
1722 		 * We found the request. It is either still waiting
1723 		 * in the framework queues or running at the provider.
1724 		 */
1725 		pd = areq->an_provider;
1726 		ASSERT(pd != NULL);
1727 
1728 		switch (pd->pd_prov_type) {
1729 		case CRYPTO_SW_PROVIDER:
1730 			mutex_enter(&gswq->gs_lock);
1731 			mutex_enter(&areq->an_lock);
1732 
1733 			/* This request can be safely canceled. */
1734 			if (areq->an_state <= REQ_WAITING) {
1735 				/* Remove from gswq, global software queue. */
1736 				kcf_remove_node(areq);
1737 				if ((ictx = areq->an_context) != NULL)
1738 					kcf_removereq_in_ctxchain(ictx, areq);
1739 
1740 				mutex_exit(&areq->an_lock);
1741 				mutex_exit(&gswq->gs_lock);
1742 				mutex_exit(&rt->rt_lock);
1743 
1744 				/* Remove areq from hash table and free it. */
1745 				kcf_reqid_delete(areq);
1746 				KCF_AREQ_REFRELE(areq);
1747 				return;
1748 			}
1749 
1750 			mutex_exit(&areq->an_lock);
1751 			mutex_exit(&gswq->gs_lock);
1752 			break;
1753 
1754 		case CRYPTO_HW_PROVIDER:
1755 			/*
1756 			 * There is no interface to remove an entry
1757 			 * once it is on the taskq. So, we do not do
1758 			 * any thing for a hardware provider.
1759 			 */
1760 			break;
1761 		}
1762 
1763 		/*
1764 		 * The request is running. Wait for the request completion
1765 		 * to notify us.
1766 		 */
1767 		KCF_AREQ_REFHOLD(areq);
1768 		while (GET_REQID(areq) == id)
1769 			cv_wait(&areq->an_done, &rt->rt_lock);
1770 		KCF_AREQ_REFRELE(areq);
1771 		break;
1772 	}
1773 	}
1774 
1775 	mutex_exit(&rt->rt_lock);
1776 }
1777 
1778 /*
1779  * Cancel all asynchronous requests associated with the
1780  * passed in crypto context and free it.
1781  *
1782  * A client SHOULD NOT call this routine after calling a crypto_*_final
1783  * routine. This routine is called only during intermediate operations.
1784  * The client should not use the crypto context after this function returns
1785  * since we destroy it.
1786  *
1787  * Calling context:
1788  * 	Can be called from user context only.
1789  */
1790 void
1791 crypto_cancel_ctx(crypto_context_t ctx)
1792 {
1793 	kcf_context_t *ictx;
1794 	kcf_areq_node_t *areq;
1795 
1796 	if (ctx == NULL)
1797 		return;
1798 
1799 	ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private;
1800 
1801 	mutex_enter(&ictx->kc_in_use_lock);
1802 
1803 	/* Walk the chain and cancel each request */
1804 	while ((areq = ictx->kc_req_chain_first) != NULL) {
1805 		/*
1806 		 * We have to drop the lock here as we may have
1807 		 * to wait for request completion. We hold the
1808 		 * request before dropping the lock though, so that it
1809 		 * won't be freed underneath us.
1810 		 */
1811 		KCF_AREQ_REFHOLD(areq);
1812 		mutex_exit(&ictx->kc_in_use_lock);
1813 
1814 		crypto_cancel_req(GET_REQID(areq));
1815 		KCF_AREQ_REFRELE(areq);
1816 
1817 		mutex_enter(&ictx->kc_in_use_lock);
1818 	}
1819 
1820 	mutex_exit(&ictx->kc_in_use_lock);
1821 	KCF_CONTEXT_REFRELE(ictx);
1822 }
1823 
1824 /*
1825  * Update kstats.
1826  */
1827 static int
1828 kcf_misc_kstat_update(kstat_t *ksp, int rw)
1829 {
1830 	uint_t tcnt;
1831 	kcf_stats_t *ks_data;
1832 
1833 	if (rw == KSTAT_WRITE)
1834 		return (EACCES);
1835 
1836 	ks_data = ksp->ks_data;
1837 
1838 	ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads;
1839 	/*
1840 	 * The failover thread is counted in kp_idlethreads in
1841 	 * some corner cases. This is done to avoid doing more checks
1842 	 * when submitting a request. We account for those cases below.
1843 	 */
1844 	if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1))
1845 		tcnt--;
1846 	ks_data->ks_idle_thrs.value.ui32 = tcnt;
1847 	ks_data->ks_minthrs.value.ui32 = kcf_minthreads;
1848 	ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads;
1849 	ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs;
1850 	ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs;
1851 	ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads;
1852 	ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc;
1853 	ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc;
1854 
1855 	return (0);
1856 }
1857 
1858 /*
1859  * Allocate and initiatize a kcf_dual_req, used for saving the arguments of
1860  * a dual operation or an atomic operation that has to be internally
1861  * simulated with multiple single steps.
1862  * crq determines the memory allocation flags.
1863  */
1864 
1865 kcf_dual_req_t *
1866 kcf_alloc_req(crypto_call_req_t *crq)
1867 {
1868 	kcf_dual_req_t *kcr;
1869 
1870 	kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq));
1871 
1872 	if (kcr == NULL)
1873 		return (NULL);
1874 
1875 	/* Copy the whole crypto_call_req struct, as it isn't persistant */
1876 	if (crq != NULL)
1877 		kcr->kr_callreq = *crq;
1878 	else
1879 		bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t));
1880 	kcr->kr_areq = NULL;
1881 	kcr->kr_saveoffset = 0;
1882 	kcr->kr_savelen = 0;
1883 
1884 	return (kcr);
1885 }
1886 
1887 /*
1888  * Callback routine for the next part of a simulated dual part.
1889  * Schedules the next step.
1890  *
1891  * This routine can be called from interrupt context.
1892  */
1893 void
1894 kcf_next_req(void *next_req_arg, int status)
1895 {
1896 	kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
1897 	kcf_req_params_t *params = &(next_req->kr_params);
1898 	kcf_areq_node_t *areq = next_req->kr_areq;
1899 	int error = status;
1900 	kcf_provider_desc_t *pd;
1901 	crypto_dual_data_t *ct;
1902 
1903 	/* Stop the processing if an error occured at this step */
1904 	if (error != CRYPTO_SUCCESS) {
1905 out:
1906 		areq->an_reqarg = next_req->kr_callreq;
1907 		KCF_AREQ_REFRELE(areq);
1908 		kmem_free(next_req, sizeof (kcf_dual_req_t));
1909 		areq->an_isdual = B_FALSE;
1910 		kcf_aop_done(areq, error);
1911 		return;
1912 	}
1913 
1914 	switch (params->rp_opgrp) {
1915 	case KCF_OG_MAC: {
1916 
1917 		/*
1918 		 * The next req is submitted with the same reqid as the
1919 		 * first part. The consumer only got back that reqid, and
1920 		 * should still be able to cancel the operation during its
1921 		 * second step.
1922 		 */
1923 		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
1924 		crypto_ctx_template_t mac_tmpl;
1925 		kcf_mech_entry_t *me;
1926 
1927 		ct = (crypto_dual_data_t *)mops->mo_data;
1928 		mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;
1929 
1930 		/* No expected recoverable failures, so no retry list */
1931 		pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
1932 		    &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
1933 		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);
1934 
1935 		if (pd == NULL) {
1936 			error = CRYPTO_MECH_NOT_SUPPORTED;
1937 			goto out;
1938 		}
1939 		/* Validate the MAC context template here */
1940 		if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
1941 		    (mac_tmpl != NULL)) {
1942 			kcf_ctx_template_t *ctx_mac_tmpl;
1943 
1944 			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
1945 
1946 			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
1947 				KCF_PROV_REFRELE(pd);
1948 				error = CRYPTO_OLD_CTX_TEMPLATE;
1949 				goto out;
1950 			}
1951 			mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
1952 		}
1953 
1954 		break;
1955 	}
1956 	case KCF_OG_DECRYPT: {
1957 		kcf_decrypt_ops_params_t *dcrops =
1958 		    &(params->rp_u.decrypt_params);
1959 
1960 		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
1961 		/* No expected recoverable failures, so no retry list */
1962 		pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
1963 		    NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
1964 		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);
1965 
1966 		if (pd == NULL) {
1967 			error = CRYPTO_MECH_NOT_SUPPORTED;
1968 			goto out;
1969 		}
1970 		break;
1971 	}
1972 	}
1973 
1974 	/* The second step uses len2 and offset2 of the dual_data */
1975 	next_req->kr_saveoffset = ct->dd_offset1;
1976 	next_req->kr_savelen = ct->dd_len1;
1977 	ct->dd_offset1 = ct->dd_offset2;
1978 	ct->dd_len1 = ct->dd_len2;
1979 
1980 	/* preserve if the caller is restricted */
1981 	if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
1982 		areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
1983 	} else {
1984 		areq->an_reqarg.cr_flag = 0;
1985 	}
1986 
1987 	areq->an_reqarg.cr_callback_func = kcf_last_req;
1988 	areq->an_reqarg.cr_callback_arg = next_req;
1989 	areq->an_isdual = B_TRUE;
1990 
1991 	/*
1992 	 * We would like to call kcf_submit_request() here. But,
1993 	 * that is not possible as that routine allocates a new
1994 	 * kcf_areq_node_t request structure, while we need to
1995 	 * reuse the existing request structure.
1996 	 */
1997 	switch (pd->pd_prov_type) {
1998 	case CRYPTO_SW_PROVIDER:
1999 		error = common_submit_request(pd, NULL, params,
2000 		    KCF_RHNDL(KM_NOSLEEP));
2001 		break;
2002 
2003 	case CRYPTO_HW_PROVIDER: {
2004 		kcf_provider_desc_t *old_pd;
2005 		taskq_t *taskq = pd->pd_sched_info.ks_taskq;
2006 
2007 		/*
2008 		 * Set the params for the second step in the
2009 		 * dual-ops.
2010 		 */
2011 		areq->an_params = *params;
2012 		old_pd = areq->an_provider;
2013 		KCF_PROV_REFRELE(old_pd);
2014 		KCF_PROV_REFHOLD(pd);
2015 		areq->an_provider = pd;
2016 
2017 		/*
2018 		 * Note that we have to do a taskq_dispatch()
2019 		 * here as we may be in interrupt context.
2020 		 */
2021 		if (taskq_dispatch(taskq, process_req_hwp, areq,
2022 		    TQ_NOSLEEP) == (taskqid_t)0) {
2023 			error = CRYPTO_HOST_MEMORY;
2024 		} else {
2025 			error = CRYPTO_QUEUED;
2026 		}
2027 		break;
2028 	}
2029 	}
2030 
2031 	/*
2032 	 * We have to release the holds on the request and the provider
2033 	 * in all cases.
2034 	 */
2035 	KCF_AREQ_REFRELE(areq);
2036 	KCF_PROV_REFRELE(pd);
2037 
2038 	if (error != CRYPTO_QUEUED) {
2039 		/* restore, clean up, and invoke the client's callback */
2040 
2041 		ct->dd_offset1 = next_req->kr_saveoffset;
2042 		ct->dd_len1 = next_req->kr_savelen;
2043 		areq->an_reqarg = next_req->kr_callreq;
2044 		kmem_free(next_req, sizeof (kcf_dual_req_t));
2045 		areq->an_isdual = B_FALSE;
2046 		kcf_aop_done(areq, error);
2047 	}
2048 }
2049 
2050 /*
2051  * Last part of an emulated dual operation.
2052  * Clean up and restore ...
2053  */
2054 void
2055 kcf_last_req(void *last_req_arg, int status)
2056 {
2057 	kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;
2058 
2059 	kcf_req_params_t *params = &(last_req->kr_params);
2060 	kcf_areq_node_t *areq = last_req->kr_areq;
2061 	crypto_dual_data_t *ct;
2062 
2063 	switch (params->rp_opgrp) {
2064 	case KCF_OG_MAC: {
2065 		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
2066 
2067 		ct = (crypto_dual_data_t *)mops->mo_data;
2068 		break;
2069 	}
2070 	case KCF_OG_DECRYPT: {
2071 		kcf_decrypt_ops_params_t *dcrops =
2072 		    &(params->rp_u.decrypt_params);
2073 
2074 		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
2075 		break;
2076 	}
2077 	}
2078 	ct->dd_offset1 = last_req->kr_saveoffset;
2079 	ct->dd_len1 = last_req->kr_savelen;
2080 
2081 	/* The submitter used kcf_last_req as its callback */
2082 
2083 	if (areq == NULL) {
2084 		crypto_call_req_t *cr = &last_req->kr_callreq;
2085 
2086 		(*(cr->cr_callback_func))(cr->cr_callback_arg, status);
2087 		kmem_free(last_req, sizeof (kcf_dual_req_t));
2088 		return;
2089 	}
2090 	areq->an_reqarg = last_req->kr_callreq;
2091 	KCF_AREQ_REFRELE(areq);
2092 	kmem_free(last_req, sizeof (kcf_dual_req_t));
2093 	areq->an_isdual = B_FALSE;
2094 	kcf_aop_done(areq, status);
2095 }
2096