xref: /titanic_51/usr/src/uts/common/crypto/core/kcf_sched.c (revision 57a3024278e9bc2b525c34779dc1336b309e2d05)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
27  */
28 
29 /*
30  * This file contains the core framework routines for the
31  * kernel cryptographic framework. These routines are at the
32  * layer, between the kernel API/ioctls and the SPI.
33  */
34 
35 #include <sys/types.h>
36 #include <sys/errno.h>
37 #include <sys/kmem.h>
38 #include <sys/proc.h>
39 #include <sys/cpuvar.h>
40 #include <sys/cpupart.h>
41 #include <sys/ksynch.h>
42 #include <sys/callb.h>
43 #include <sys/cmn_err.h>
44 #include <sys/systm.h>
45 #include <sys/sysmacros.h>
46 #include <sys/kstat.h>
47 #include <sys/crypto/common.h>
48 #include <sys/crypto/impl.h>
49 #include <sys/crypto/sched_impl.h>
50 #include <sys/crypto/api.h>
51 #include <sys/crypto/spi.h>
52 #include <sys/taskq_impl.h>
53 #include <sys/ddi.h>
54 #include <sys/sunddi.h>
55 
56 
57 kcf_global_swq_t *gswq;	/* Global software queue */
58 
59 /* Thread pool related variables */
60 static kcf_pool_t *kcfpool;	/* Thread pool of kcfd LWPs */
61 int kcf_maxthreads = 2;
62 int kcf_minthreads = 1;
63 int kcf_thr_multiple = 2;	/* Boot-time tunable for experimentation */
64 static ulong_t	kcf_idlethr_timeout;
65 static boolean_t kcf_sched_running = B_FALSE;
66 #define	KCF_DEFAULT_THRTIMEOUT	60000000	/* 60 seconds */
67 
68 /* kmem caches used by the scheduler */
69 static struct kmem_cache *kcf_sreq_cache;
70 static struct kmem_cache *kcf_areq_cache;
71 static struct kmem_cache *kcf_context_cache;
72 
73 /* Global request ID table */
74 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES];
75 
76 /* KCF stats. Not protected. */
77 static kcf_stats_t kcf_ksdata = {
78 	{ "total threads in pool",	KSTAT_DATA_UINT32},
79 	{ "idle threads in pool",	KSTAT_DATA_UINT32},
80 	{ "min threads in pool",	KSTAT_DATA_UINT32},
81 	{ "max threads in pool",	KSTAT_DATA_UINT32},
82 	{ "requests in gswq",		KSTAT_DATA_UINT32},
83 	{ "max requests in gswq",	KSTAT_DATA_UINT32},
84 	{ "threads for HW taskq",	KSTAT_DATA_UINT32},
85 	{ "minalloc for HW taskq",	KSTAT_DATA_UINT32},
86 	{ "maxalloc for HW taskq",	KSTAT_DATA_UINT32}
87 };
88 
89 static kstat_t *kcf_misc_kstat = NULL;
90 ulong_t kcf_swprov_hndl = 0;
91 
92 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *,
93     kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t);
94 static int kcf_disp_sw_request(kcf_areq_node_t *);
95 static void process_req_hwp(void *);
96 static kcf_areq_node_t	*kcf_dequeue(void);
97 static int kcf_enqueue(kcf_areq_node_t *);
98 static void kcfpool_alloc(void);
99 static void kcf_reqid_delete(kcf_areq_node_t *areq);
100 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq);
101 static int kcf_misc_kstat_update(kstat_t *ksp, int rw);
102 static void compute_min_max_threads(void);
103 static void kcfpool_svc(void *);
104 static void kcfpoold(void *);
105 
106 
107 /*
108  * Create a new context.
109  */
110 crypto_ctx_t *
111 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd,
112     crypto_session_id_t sid)
113 {
114 	crypto_ctx_t *ctx;
115 	kcf_context_t *kcf_ctx;
116 
117 	kcf_ctx = kmem_cache_alloc(kcf_context_cache,
118 	    (crq == NULL) ? KM_SLEEP : KM_NOSLEEP);
119 	if (kcf_ctx == NULL)
120 		return (NULL);
121 
122 	/* initialize the context for the consumer */
123 	kcf_ctx->kc_refcnt = 1;
124 	kcf_ctx->kc_req_chain_first = NULL;
125 	kcf_ctx->kc_req_chain_last = NULL;
126 	kcf_ctx->kc_secondctx = NULL;
127 	KCF_PROV_REFHOLD(pd);
128 	kcf_ctx->kc_prov_desc = pd;
129 	kcf_ctx->kc_sw_prov_desc = NULL;
130 	kcf_ctx->kc_mech = NULL;
131 
132 	ctx = &kcf_ctx->kc_glbl_ctx;
133 	ctx->cc_provider = pd->pd_prov_handle;
134 	ctx->cc_session = sid;
135 	ctx->cc_provider_private = NULL;
136 	ctx->cc_framework_private = (void *)kcf_ctx;
137 	ctx->cc_flags = 0;
138 	ctx->cc_opstate = NULL;
139 
140 	return (ctx);
141 }
142 
143 /*
144  * Allocate a new async request node.
145  *
146  * ictx - Framework private context pointer
147  * crq - Has callback function and argument. Should be non NULL.
148  * req - The parameters to pass to the SPI
149  */
150 static kcf_areq_node_t *
151 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx,
152     crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual)
153 {
154 	kcf_areq_node_t	*arptr, *areq;
155 
156 	ASSERT(crq != NULL);
157 	arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP);
158 	if (arptr == NULL)
159 		return (NULL);
160 
161 	arptr->an_state = REQ_ALLOCATED;
162 	arptr->an_reqarg = *crq;
163 	arptr->an_params = *req;
164 	arptr->an_context = ictx;
165 	arptr->an_isdual = isdual;
166 
167 	arptr->an_next = arptr->an_prev = NULL;
168 	KCF_PROV_REFHOLD(pd);
169 	arptr->an_provider = pd;
170 	arptr->an_tried_plist = NULL;
171 	arptr->an_refcnt = 1;
172 	arptr->an_idnext = arptr->an_idprev = NULL;
173 
174 	/*
175 	 * Requests for context-less operations do not use the
176 	 * fields - an_is_my_turn, and an_ctxchain_next.
177 	 */
178 	if (ictx == NULL)
179 		return (arptr);
180 
181 	KCF_CONTEXT_REFHOLD(ictx);
182 	/*
183 	 * Chain this request to the context.
184 	 */
185 	mutex_enter(&ictx->kc_in_use_lock);
186 	arptr->an_ctxchain_next = NULL;
187 	if ((areq = ictx->kc_req_chain_last) == NULL) {
188 		arptr->an_is_my_turn = B_TRUE;
189 		ictx->kc_req_chain_last =
190 		    ictx->kc_req_chain_first = arptr;
191 	} else {
192 		ASSERT(ictx->kc_req_chain_first != NULL);
193 		arptr->an_is_my_turn = B_FALSE;
194 		/* Insert the new request to the end of the chain. */
195 		areq->an_ctxchain_next = arptr;
196 		ictx->kc_req_chain_last = arptr;
197 	}
198 	mutex_exit(&ictx->kc_in_use_lock);
199 
200 	return (arptr);
201 }
202 
203 /*
204  * Queue the request node and do one of the following:
205  *	- If there is an idle thread signal it to run.
206  *	- Else, signal the creator thread to possibly create more threads.
207  */
208 static int
209 kcf_disp_sw_request(kcf_areq_node_t *areq)
210 {
211 	int err;
212 
213 	if ((err = kcf_enqueue(areq)) != 0)
214 		return (err);
215 
216 	if (kcfpool->kp_idlethreads > 0) {
217 		/* Signal an idle thread to run */
218 		mutex_enter(&gswq->gs_lock);
219 		cv_signal(&gswq->gs_cv);
220 		mutex_exit(&gswq->gs_lock);
221 
222 		return (CRYPTO_QUEUED);
223 	}
224 
225 	/* Signal the creator thread for more threads */
226 	mutex_enter(&kcfpool->kp_lock);
227 	cv_signal(&kcfpool->kp_cv);
228 	mutex_exit(&kcfpool->kp_lock);
229 
230 	return (CRYPTO_QUEUED);
231 }
232 
233 /*
234  * This routine is called by the taskq associated with
235  * each hardware provider. We notify the kernel consumer
236  * via the callback routine in case of CRYPTO_SUCCESS or
237  * a failure.
238  *
239  * A request can be of type kcf_areq_node_t or of type
240  * kcf_sreq_node_t.
241  */
242 static void
243 process_req_hwp(void *ireq)
244 {
245 	int error = 0;
246 	crypto_ctx_t *ctx;
247 	kcf_call_type_t ctype;
248 	kcf_provider_desc_t *pd;
249 	kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
250 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;
251 	kcf_prov_cpu_t *mp;
252 
253 	pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
254 	    sreq->sn_provider : areq->an_provider;
255 
256 	/*
257 	 * Wait if flow control is in effect for the provider. A
258 	 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
259 	 * notification will signal us. We also get signaled if
260 	 * the provider is unregistering.
261 	 */
262 	if (pd->pd_state == KCF_PROV_BUSY) {
263 		mutex_enter(&pd->pd_lock);
264 		while (pd->pd_state == KCF_PROV_BUSY)
265 			cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
266 		mutex_exit(&pd->pd_lock);
267 	}
268 
269 	/*
270 	 * Bump the internal reference count while the request is being
271 	 * processed. This is how we know when it's safe to unregister
272 	 * a provider. This step must precede the pd_state check below.
273 	 */
274 	mp = &(pd->pd_percpu_bins[CPU_SEQID]);
275 	KCF_PROV_JOB_HOLD(mp);
276 
277 	/*
278 	 * Fail the request if the provider has failed. We return a
279 	 * recoverable error and the notified clients attempt any
280 	 * recovery. For async clients this is done in kcf_aop_done()
281 	 * and for sync clients it is done in the k-api routines.
282 	 */
283 	if (pd->pd_state >= KCF_PROV_FAILED) {
284 		error = CRYPTO_DEVICE_ERROR;
285 		goto bail;
286 	}
287 
288 	if (ctype == CRYPTO_SYNCH) {
289 		mutex_enter(&sreq->sn_lock);
290 		sreq->sn_state = REQ_INPROGRESS;
291 		sreq->sn_mp = mp;
292 		mutex_exit(&sreq->sn_lock);
293 
294 		ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
295 		error = common_submit_request(sreq->sn_provider, ctx,
296 		    sreq->sn_params, sreq);
297 	} else {
298 		kcf_context_t *ictx;
299 		ASSERT(ctype == CRYPTO_ASYNCH);
300 
301 		/*
302 		 * We are in the per-hardware provider thread context and
303 		 * hence can sleep. Note that the caller would have done
304 		 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
305 		 */
306 		ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;
307 
308 		mutex_enter(&areq->an_lock);
309 		/*
310 		 * We need to maintain ordering for multi-part requests.
311 		 * an_is_my_turn is set to B_TRUE initially for a request
312 		 * when it is enqueued and there are no other requests
313 		 * for that context. It is set later from kcf_aop_done() when
314 		 * the request before us in the chain of requests for the
315 		 * context completes. We get signaled at that point.
316 		 */
317 		if (ictx != NULL) {
318 			ASSERT(ictx->kc_prov_desc == areq->an_provider);
319 
320 			while (areq->an_is_my_turn == B_FALSE) {
321 				cv_wait(&areq->an_turn_cv, &areq->an_lock);
322 			}
323 		}
324 		areq->an_state = REQ_INPROGRESS;
325 		areq->an_mp = mp;
326 		mutex_exit(&areq->an_lock);
327 
328 		error = common_submit_request(areq->an_provider, ctx,
329 		    &areq->an_params, areq);
330 	}
331 
332 bail:
333 	if (error == CRYPTO_QUEUED) {
334 		/*
335 		 * The request is queued by the provider and we should
336 		 * get a crypto_op_notification() from the provider later.
337 		 * We notify the consumer at that time.
338 		 */
339 		return;
340 	} else {		/* CRYPTO_SUCCESS or other failure */
341 		KCF_PROV_JOB_RELE(mp);
342 		if (ctype == CRYPTO_SYNCH)
343 			kcf_sop_done(sreq, error);
344 		else
345 			kcf_aop_done(areq, error);
346 	}
347 }
348 
349 /*
350  * This routine checks if a request can be retried on another
351  * provider. If true, mech1 is initialized to point to the mechanism
352  * structure. mech2 is also initialized in case of a dual operation. fg
353  * is initialized to the correct crypto_func_group_t bit flag. They are
354  * initialized by this routine, so that the caller can pass them to a
355  * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change.
356  *
357  * We check that the request is for a init or atomic routine and that
358  * it is for one of the operation groups used from k-api .
359  */
360 static boolean_t
361 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1,
362     crypto_mechanism_t **mech2, crypto_func_group_t *fg)
363 {
364 	kcf_req_params_t *params;
365 	kcf_op_type_t optype;
366 
367 	params = &areq->an_params;
368 	optype = params->rp_optype;
369 
370 	if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype)))
371 		return (B_FALSE);
372 
373 	switch (params->rp_opgrp) {
374 	case KCF_OG_DIGEST: {
375 		kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
376 
377 		dops->do_mech.cm_type = dops->do_framework_mechtype;
378 		*mech1 = &dops->do_mech;
379 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST :
380 		    CRYPTO_FG_DIGEST_ATOMIC;
381 		break;
382 	}
383 
384 	case KCF_OG_MAC: {
385 		kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
386 
387 		mops->mo_mech.cm_type = mops->mo_framework_mechtype;
388 		*mech1 = &mops->mo_mech;
389 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC :
390 		    CRYPTO_FG_MAC_ATOMIC;
391 		break;
392 	}
393 
394 	case KCF_OG_SIGN: {
395 		kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
396 
397 		sops->so_mech.cm_type = sops->so_framework_mechtype;
398 		*mech1 = &sops->so_mech;
399 		switch (optype) {
400 		case KCF_OP_INIT:
401 			*fg = CRYPTO_FG_SIGN;
402 			break;
403 		case KCF_OP_ATOMIC:
404 			*fg = CRYPTO_FG_SIGN_ATOMIC;
405 			break;
406 		default:
407 			ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC);
408 			*fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC;
409 		}
410 		break;
411 	}
412 
413 	case KCF_OG_VERIFY: {
414 		kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
415 
416 		vops->vo_mech.cm_type = vops->vo_framework_mechtype;
417 		*mech1 = &vops->vo_mech;
418 		switch (optype) {
419 		case KCF_OP_INIT:
420 			*fg = CRYPTO_FG_VERIFY;
421 			break;
422 		case KCF_OP_ATOMIC:
423 			*fg = CRYPTO_FG_VERIFY_ATOMIC;
424 			break;
425 		default:
426 			ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC);
427 			*fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC;
428 		}
429 		break;
430 	}
431 
432 	case KCF_OG_ENCRYPT: {
433 		kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
434 
435 		eops->eo_mech.cm_type = eops->eo_framework_mechtype;
436 		*mech1 = &eops->eo_mech;
437 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT :
438 		    CRYPTO_FG_ENCRYPT_ATOMIC;
439 		break;
440 	}
441 
442 	case KCF_OG_DECRYPT: {
443 		kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
444 
445 		dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype;
446 		*mech1 = &dcrops->dop_mech;
447 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT :
448 		    CRYPTO_FG_DECRYPT_ATOMIC;
449 		break;
450 	}
451 
452 	case KCF_OG_ENCRYPT_MAC: {
453 		kcf_encrypt_mac_ops_params_t *eops =
454 		    &params->rp_u.encrypt_mac_params;
455 
456 		eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype;
457 		*mech1 = &eops->em_encr_mech;
458 		eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype;
459 		*mech2 = &eops->em_mac_mech;
460 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC :
461 		    CRYPTO_FG_ENCRYPT_MAC_ATOMIC;
462 		break;
463 	}
464 
465 	case KCF_OG_MAC_DECRYPT: {
466 		kcf_mac_decrypt_ops_params_t *dops =
467 		    &params->rp_u.mac_decrypt_params;
468 
469 		dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype;
470 		*mech1 = &dops->md_mac_mech;
471 		dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype;
472 		*mech2 = &dops->md_decr_mech;
473 		*fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT :
474 		    CRYPTO_FG_MAC_DECRYPT_ATOMIC;
475 		break;
476 	}
477 
478 	default:
479 		return (B_FALSE);
480 	}
481 
482 	return (B_TRUE);
483 }
484 
485 /*
486  * This routine is called when a request to a provider has failed
487  * with a recoverable error. This routine tries to find another provider
488  * and dispatches the request to the new provider, if one is available.
489  * We reuse the request structure.
490  *
491  * A return value of NULL from kcf_get_mech_provider() indicates
492  * we have tried the last provider.
493  */
494 static int
495 kcf_resubmit_request(kcf_areq_node_t *areq)
496 {
497 	int error = CRYPTO_FAILED;
498 	kcf_context_t *ictx;
499 	kcf_provider_desc_t *old_pd;
500 	kcf_provider_desc_t *new_pd;
501 	crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
502 	crypto_mech_type_t prov_mt1, prov_mt2;
503 	crypto_func_group_t fg;
504 
505 	if (!can_resubmit(areq, &mech1, &mech2, &fg))
506 		return (error);
507 
508 	old_pd = areq->an_provider;
509 	/*
510 	 * Add old_pd to the list of providers already tried.
511 	 * We release the new hold on old_pd in kcf_free_triedlist().
512 	 */
513 	if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
514 	    KM_NOSLEEP | KCF_HOLD_PROV) == NULL)
515 		return (error);
516 
517 	if (mech1 && !mech2) {
518 		new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, NULL,
519 		    &error, areq->an_tried_plist, fg, 0);
520 	} else {
521 		ASSERT(mech1 != NULL && mech2 != NULL);
522 
523 		new_pd = kcf_get_dual_provider(mech1, NULL, mech2, NULL,
524 		    NULL, &prov_mt1,
525 		    &prov_mt2, &error, areq->an_tried_plist, fg, fg, 0);
526 	}
527 
528 	if (new_pd == NULL)
529 		return (error);
530 
531 	/*
532 	 * We reuse the old context by resetting provider specific
533 	 * fields in it.
534 	 */
535 	if ((ictx = areq->an_context) != NULL) {
536 		crypto_ctx_t *ctx;
537 
538 		ASSERT(old_pd == ictx->kc_prov_desc);
539 		KCF_PROV_REFRELE(ictx->kc_prov_desc);
540 		KCF_PROV_REFHOLD(new_pd);
541 		ictx->kc_prov_desc = new_pd;
542 
543 		ctx = &ictx->kc_glbl_ctx;
544 		ctx->cc_provider = new_pd->pd_prov_handle;
545 		ctx->cc_session = new_pd->pd_sid;
546 		ctx->cc_provider_private = NULL;
547 	}
548 
549 	/* We reuse areq. by resetting the provider and context fields. */
550 	KCF_PROV_REFRELE(old_pd);
551 	KCF_PROV_REFHOLD(new_pd);
552 	areq->an_provider = new_pd;
553 	mutex_enter(&areq->an_lock);
554 	areq->an_state = REQ_WAITING;
555 	mutex_exit(&areq->an_lock);
556 
557 	switch (new_pd->pd_prov_type) {
558 	case CRYPTO_SW_PROVIDER:
559 		error = kcf_disp_sw_request(areq);
560 		break;
561 
562 	case CRYPTO_HW_PROVIDER: {
563 		taskq_t *taskq = new_pd->pd_taskq;
564 
565 		if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
566 		    (taskqid_t)0) {
567 			error = CRYPTO_HOST_MEMORY;
568 		} else {
569 			error = CRYPTO_QUEUED;
570 		}
571 
572 		break;
573 	}
574 	}
575 
576 	KCF_PROV_REFRELE(new_pd);
577 	return (error);
578 }
579 
580 #define	EMPTY_TASKQ(tq)	((tq)->tq_task.tqent_next == &(tq)->tq_task)
581 
582 /*
583  * Routine called by both ioctl and k-api. The consumer should
584  * bundle the parameters into a kcf_req_params_t structure. A bunch
585  * of macros are available in ops_impl.h for this bundling. They are:
586  *
587  * 	KCF_WRAP_DIGEST_OPS_PARAMS()
588  *	KCF_WRAP_MAC_OPS_PARAMS()
589  *	KCF_WRAP_ENCRYPT_OPS_PARAMS()
590  *	KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
591  *
592  * It is the caller's responsibility to free the ctx argument when
593  * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
594  */
595 int
596 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
597     crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
598 {
599 	int error;
600 	kcf_areq_node_t *areq;
601 	kcf_sreq_node_t *sreq;
602 	kcf_context_t *kcf_ctx;
603 	taskq_t *taskq;
604 	kcf_prov_cpu_t *mp;
605 
606 	kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;
607 
608 	/* Synchronous cases */
609 	if (crq == NULL) {
610 		switch (pd->pd_prov_type) {
611 		case CRYPTO_SW_PROVIDER:
612 			error = common_submit_request(pd, ctx, params,
613 			    KCF_RHNDL(KM_SLEEP));
614 			break;
615 
616 		case CRYPTO_HW_PROVIDER:
617 			taskq = pd->pd_taskq;
618 
619 			/*
620 			 * Special case for CRYPTO_SYNCHRONOUS providers that
621 			 * never return a CRYPTO_QUEUED error. We skip any
622 			 * request allocation and call the SPI directly.
623 			 */
624 			if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) &&
625 			    EMPTY_TASKQ(taskq)) {
626 				mp = &(pd->pd_percpu_bins[CPU_SEQID]);
627 				KCF_PROV_JOB_HOLD(mp);
628 
629 				if (pd->pd_state == KCF_PROV_READY) {
630 					error = common_submit_request(pd, ctx,
631 					    params, KCF_RHNDL(KM_SLEEP));
632 					KCF_PROV_JOB_RELE(mp);
633 					ASSERT(error != CRYPTO_QUEUED);
634 					break;
635 				}
636 				KCF_PROV_JOB_RELE(mp);
637 			}
638 
639 			sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
640 			sreq->sn_state = REQ_ALLOCATED;
641 			sreq->sn_rv = CRYPTO_FAILED;
642 			sreq->sn_params = params;
643 
644 			/*
645 			 * Note that we do not need to hold the context
646 			 * for synchronous case as the context will never
647 			 * become invalid underneath us. We do not need to hold
648 			 * the provider here either as the caller has a hold.
649 			 */
650 			sreq->sn_context = kcf_ctx;
651 			ASSERT(KCF_PROV_REFHELD(pd));
652 			sreq->sn_provider = pd;
653 
654 			ASSERT(taskq != NULL);
655 			/*
656 			 * Call the SPI directly if the taskq is empty and the
657 			 * provider is not busy, else dispatch to the taskq.
658 			 * Calling directly is fine as this is the synchronous
659 			 * case. This is unlike the asynchronous case where we
660 			 * must always dispatch to the taskq.
661 			 */
662 			if (EMPTY_TASKQ(taskq) &&
663 			    pd->pd_state == KCF_PROV_READY) {
664 				process_req_hwp(sreq);
665 			} else {
666 				/*
667 				 * We can not tell from taskq_dispatch() return
668 				 * value if we exceeded maxalloc. Hence the
669 				 * check here. Since we are allowed to wait in
670 				 * the synchronous case, we wait for the taskq
671 				 * to become empty.
672 				 */
673 				if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
674 					taskq_wait(taskq);
675 				}
676 
677 				(void) taskq_dispatch(taskq, process_req_hwp,
678 				    sreq, TQ_SLEEP);
679 			}
680 
681 			/*
682 			 * Wait for the notification to arrive,
683 			 * if the operation is not done yet.
684 			 * Bug# 4722589 will make the wait a cv_wait_sig().
685 			 */
686 			mutex_enter(&sreq->sn_lock);
687 			while (sreq->sn_state < REQ_DONE)
688 				cv_wait(&sreq->sn_cv, &sreq->sn_lock);
689 			mutex_exit(&sreq->sn_lock);
690 
691 			error = sreq->sn_rv;
692 			kmem_cache_free(kcf_sreq_cache, sreq);
693 
694 			break;
695 
696 		default:
697 			error = CRYPTO_FAILED;
698 			break;
699 		}
700 
701 	} else {	/* Asynchronous cases */
702 		switch (pd->pd_prov_type) {
703 		case CRYPTO_SW_PROVIDER:
704 			if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
705 				/*
706 				 * This case has less overhead since there is
707 				 * no switching of context.
708 				 */
709 				error = common_submit_request(pd, ctx, params,
710 				    KCF_RHNDL(KM_NOSLEEP));
711 			} else {
712 				/*
713 				 * CRYPTO_ALWAYS_QUEUE is set. We need to
714 				 * queue the request and return.
715 				 */
716 				areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
717 				    params, cont);
718 				if (areq == NULL)
719 					error = CRYPTO_HOST_MEMORY;
720 				else {
721 					if (!(crq->cr_flag
722 					    & CRYPTO_SKIP_REQID)) {
723 					/*
724 					 * Set the request handle. This handle
725 					 * is used for any crypto_cancel_req(9f)
726 					 * calls from the consumer. We have to
727 					 * do this before dispatching the
728 					 * request.
729 					 */
730 					crq->cr_reqid = kcf_reqid_insert(areq);
731 					}
732 
733 					error = kcf_disp_sw_request(areq);
734 					/*
735 					 * There is an error processing this
736 					 * request. Remove the handle and
737 					 * release the request structure.
738 					 */
739 					if (error != CRYPTO_QUEUED) {
740 						if (!(crq->cr_flag
741 						    & CRYPTO_SKIP_REQID))
742 							kcf_reqid_delete(areq);
743 						KCF_AREQ_REFRELE(areq);
744 					}
745 				}
746 			}
747 			break;
748 
749 		case CRYPTO_HW_PROVIDER:
750 			/*
751 			 * We need to queue the request and return.
752 			 */
753 			areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
754 			    cont);
755 			if (areq == NULL) {
756 				error = CRYPTO_HOST_MEMORY;
757 				goto done;
758 			}
759 
760 			taskq = pd->pd_taskq;
761 			ASSERT(taskq != NULL);
762 			/*
763 			 * We can not tell from taskq_dispatch() return
764 			 * value if we exceeded maxalloc. Hence the check
765 			 * here.
766 			 */
767 			if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
768 				error = CRYPTO_BUSY;
769 				KCF_AREQ_REFRELE(areq);
770 				goto done;
771 			}
772 
773 			if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
774 			/*
775 			 * Set the request handle. This handle is used
776 			 * for any crypto_cancel_req(9f) calls from the
777 			 * consumer. We have to do this before dispatching
778 			 * the request.
779 			 */
780 			crq->cr_reqid = kcf_reqid_insert(areq);
781 			}
782 
783 			if (taskq_dispatch(taskq,
784 			    process_req_hwp, areq, TQ_NOSLEEP) ==
785 			    (taskqid_t)0) {
786 				error = CRYPTO_HOST_MEMORY;
787 				if (!(crq->cr_flag & CRYPTO_SKIP_REQID))
788 					kcf_reqid_delete(areq);
789 				KCF_AREQ_REFRELE(areq);
790 			} else {
791 				error = CRYPTO_QUEUED;
792 			}
793 			break;
794 
795 		default:
796 			error = CRYPTO_FAILED;
797 			break;
798 		}
799 	}
800 
801 done:
802 	return (error);
803 }
804 
805 /*
806  * We're done with this framework context, so free it. Note that freeing
807  * framework context (kcf_context) frees the global context (crypto_ctx).
808  *
809  * The provider is responsible for freeing provider private context after a
810  * final or single operation and resetting the cc_provider_private field
811  * to NULL. It should do this before it notifies the framework of the
812  * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
813  * like crypto_cancel_ctx(9f).
814  */
815 void
816 kcf_free_context(kcf_context_t *kcf_ctx)
817 {
818 	kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
819 	crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
820 	kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;
821 	kcf_prov_cpu_t *mp;
822 
823 	/* Release the second context, if any */
824 
825 	if (kcf_secondctx != NULL)
826 		KCF_CONTEXT_REFRELE(kcf_secondctx);
827 
828 	if (gctx->cc_provider_private != NULL) {
829 		mutex_enter(&pd->pd_lock);
830 		if (!KCF_IS_PROV_REMOVED(pd)) {
831 			/*
832 			 * Increment the provider's internal refcnt so it
833 			 * doesn't unregister from the framework while
834 			 * we're calling the entry point.
835 			 */
836 			mp = &(pd->pd_percpu_bins[CPU_SEQID]);
837 			KCF_PROV_JOB_HOLD(mp);
838 			mutex_exit(&pd->pd_lock);
839 			(void) KCF_PROV_FREE_CONTEXT(pd, gctx);
840 			KCF_PROV_JOB_RELE(mp);
841 		} else {
842 			mutex_exit(&pd->pd_lock);
843 		}
844 	}
845 
846 	/* kcf_ctx->kc_prov_desc has a hold on pd */
847 	KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);
848 
849 	/* check if this context is shared with a software provider */
850 	if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
851 	    kcf_ctx->kc_sw_prov_desc != NULL) {
852 		KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
853 	}
854 
855 	kmem_cache_free(kcf_context_cache, kcf_ctx);
856 }
857 
858 /*
859  * Free the request after releasing all the holds.
860  */
861 void
862 kcf_free_req(kcf_areq_node_t *areq)
863 {
864 	KCF_PROV_REFRELE(areq->an_provider);
865 	if (areq->an_context != NULL)
866 		KCF_CONTEXT_REFRELE(areq->an_context);
867 
868 	if (areq->an_tried_plist != NULL)
869 		kcf_free_triedlist(areq->an_tried_plist);
870 	kmem_cache_free(kcf_areq_cache, areq);
871 }
872 
873 /*
874  * Utility routine to remove a request from the chain of requests
875  * hanging off a context.
876  */
877 void
878 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq)
879 {
880 	kcf_areq_node_t *cur, *prev;
881 
882 	/*
883 	 * Get context lock, search for areq in the chain and remove it.
884 	 */
885 	ASSERT(ictx != NULL);
886 	mutex_enter(&ictx->kc_in_use_lock);
887 	prev = cur = ictx->kc_req_chain_first;
888 
889 	while (cur != NULL) {
890 		if (cur == areq) {
891 			if (prev == cur) {
892 				if ((ictx->kc_req_chain_first =
893 				    cur->an_ctxchain_next) == NULL)
894 					ictx->kc_req_chain_last = NULL;
895 			} else {
896 				if (cur == ictx->kc_req_chain_last)
897 					ictx->kc_req_chain_last = prev;
898 				prev->an_ctxchain_next = cur->an_ctxchain_next;
899 			}
900 
901 			break;
902 		}
903 		prev = cur;
904 		cur = cur->an_ctxchain_next;
905 	}
906 	mutex_exit(&ictx->kc_in_use_lock);
907 }
908 
909 /*
910  * Remove the specified node from the global software queue.
911  *
912  * The caller must hold the queue lock and request lock (an_lock).
913  */
914 void
915 kcf_remove_node(kcf_areq_node_t *node)
916 {
917 	kcf_areq_node_t *nextp = node->an_next;
918 	kcf_areq_node_t *prevp = node->an_prev;
919 
920 	ASSERT(mutex_owned(&gswq->gs_lock));
921 
922 	if (nextp != NULL)
923 		nextp->an_prev = prevp;
924 	else
925 		gswq->gs_last = prevp;
926 
927 	if (prevp != NULL)
928 		prevp->an_next = nextp;
929 	else
930 		gswq->gs_first = nextp;
931 
932 	ASSERT(mutex_owned(&node->an_lock));
933 	node->an_state = REQ_CANCELED;
934 }
935 
936 /*
937  * Remove and return the first node in the global software queue.
938  *
939  * The caller must hold the queue lock.
940  */
941 static kcf_areq_node_t *
942 kcf_dequeue(void)
943 {
944 	kcf_areq_node_t *tnode = NULL;
945 
946 	ASSERT(mutex_owned(&gswq->gs_lock));
947 	if ((tnode = gswq->gs_first) == NULL) {
948 		return (NULL);
949 	} else {
950 		ASSERT(gswq->gs_first->an_prev == NULL);
951 		gswq->gs_first = tnode->an_next;
952 		if (tnode->an_next == NULL)
953 			gswq->gs_last = NULL;
954 		else
955 			tnode->an_next->an_prev = NULL;
956 	}
957 
958 	gswq->gs_njobs--;
959 	return (tnode);
960 }
961 
962 /*
963  * Add the request node to the end of the global software queue.
964  *
965  * The caller should not hold the queue lock. Returns 0 if the
966  * request is successfully queued. Returns CRYPTO_BUSY if the limit
967  * on the number of jobs is exceeded.
968  */
969 static int
970 kcf_enqueue(kcf_areq_node_t *node)
971 {
972 	kcf_areq_node_t *tnode;
973 
974 	mutex_enter(&gswq->gs_lock);
975 
976 	if (gswq->gs_njobs >= gswq->gs_maxjobs) {
977 		mutex_exit(&gswq->gs_lock);
978 		return (CRYPTO_BUSY);
979 	}
980 
981 	if (gswq->gs_last == NULL) {
982 		gswq->gs_first = gswq->gs_last = node;
983 	} else {
984 		ASSERT(gswq->gs_last->an_next == NULL);
985 		tnode = gswq->gs_last;
986 		tnode->an_next = node;
987 		gswq->gs_last = node;
988 		node->an_prev = tnode;
989 	}
990 
991 	gswq->gs_njobs++;
992 
993 	/* an_lock not needed here as we hold gs_lock */
994 	node->an_state = REQ_WAITING;
995 
996 	mutex_exit(&gswq->gs_lock);
997 
998 	return (0);
999 }
1000 
1001 /*
1002  * Function run by a thread from kcfpool to work on global software queue.
1003  */
1004 void
1005 kcfpool_svc(void *arg)
1006 {
1007 	_NOTE(ARGUNUSED(arg));
1008 	int error = 0;
1009 	clock_t rv;
1010 	clock_t timeout_val = drv_usectohz(kcf_idlethr_timeout);
1011 	kcf_areq_node_t *req;
1012 	kcf_context_t *ictx;
1013 	kcf_provider_desc_t *pd;
1014 
1015 	KCF_ATOMIC_INCR(kcfpool->kp_threads);
1016 
1017 	for (;;) {
1018 		mutex_enter(&gswq->gs_lock);
1019 
1020 		while ((req = kcf_dequeue()) == NULL) {
1021 			KCF_ATOMIC_INCR(kcfpool->kp_idlethreads);
1022 			rv = cv_reltimedwait(&gswq->gs_cv,
1023 			    &gswq->gs_lock, timeout_val, TR_CLOCK_TICK);
1024 			KCF_ATOMIC_DECR(kcfpool->kp_idlethreads);
1025 
1026 			switch (rv) {
1027 			case 0:
1028 			case -1:
1029 				/*
1030 				 * Woke up with no work to do. Check
1031 				 * if this thread should exit. We keep
1032 				 * at least kcf_minthreads.
1033 				 */
1034 				if (kcfpool->kp_threads > kcf_minthreads) {
1035 					KCF_ATOMIC_DECR(kcfpool->kp_threads);
1036 					mutex_exit(&gswq->gs_lock);
1037 
1038 					/*
1039 					 * lwp_exit() assumes it is called
1040 					 * with the proc lock held.  But the
1041 					 * first thing it does is drop it.
1042 					 * This ensures that lwp does not
1043 					 * exit before lwp_create is done
1044 					 * with it.
1045 					 */
1046 					mutex_enter(&curproc->p_lock);
1047 					lwp_exit();	/* does not return */
1048 				}
1049 
1050 				/* Resume the wait for work. */
1051 				break;
1052 
1053 			default:
1054 				/*
1055 				 * We are signaled to work on the queue.
1056 				 */
1057 				break;
1058 			}
1059 		}
1060 
1061 		mutex_exit(&gswq->gs_lock);
1062 
1063 		ictx = req->an_context;
1064 		if (ictx == NULL) {	/* Context-less operation */
1065 			pd = req->an_provider;
1066 			error = common_submit_request(pd, NULL,
1067 			    &req->an_params, req);
1068 			kcf_aop_done(req, error);
1069 			continue;
1070 		}
1071 
1072 		/*
1073 		 * We check if we can work on the request now.
1074 		 * Solaris does not guarantee any order on how the threads
1075 		 * are scheduled or how the waiters on a mutex are chosen.
1076 		 * So, we need to maintain our own order.
1077 		 *
1078 		 * is_my_turn is set to B_TRUE initially for a request when
1079 		 * it is enqueued and there are no other requests
1080 		 * for that context.  Note that a thread sleeping on
1081 		 * an_turn_cv is not counted as an idle thread. This is
1082 		 * because we define an idle thread as one that sleeps on the
1083 		 * global queue waiting for new requests.
1084 		 */
1085 		mutex_enter(&req->an_lock);
1086 		while (req->an_is_my_turn == B_FALSE) {
1087 			KCF_ATOMIC_INCR(kcfpool->kp_blockedthreads);
1088 			cv_wait(&req->an_turn_cv, &req->an_lock);
1089 			KCF_ATOMIC_DECR(kcfpool->kp_blockedthreads);
1090 		}
1091 
1092 		req->an_state = REQ_INPROGRESS;
1093 		mutex_exit(&req->an_lock);
1094 
1095 		pd = ictx->kc_prov_desc;
1096 		ASSERT(pd == req->an_provider);
1097 		error = common_submit_request(pd, &ictx->kc_glbl_ctx,
1098 		    &req->an_params, req);
1099 
1100 		kcf_aop_done(req, error);
1101 	}
1102 }
1103 
1104 /*
1105  * kmem_cache_alloc constructor for sync request structure.
1106  */
1107 /* ARGSUSED */
1108 static int
1109 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1110 {
1111 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
1112 
1113 	sreq->sn_type = CRYPTO_SYNCH;
1114 	cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL);
1115 	mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL);
1116 
1117 	return (0);
1118 }
1119 
1120 /* ARGSUSED */
1121 static void
1122 kcf_sreq_cache_destructor(void *buf, void *cdrarg)
1123 {
1124 	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf;
1125 
1126 	mutex_destroy(&sreq->sn_lock);
1127 	cv_destroy(&sreq->sn_cv);
1128 }
1129 
1130 /*
1131  * kmem_cache_alloc constructor for async request structure.
1132  */
1133 /* ARGSUSED */
1134 static int
1135 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags)
1136 {
1137 	kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1138 
1139 	areq->an_type = CRYPTO_ASYNCH;
1140 	areq->an_refcnt = 0;
1141 	mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL);
1142 	cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL);
1143 	cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL);
1144 
1145 	return (0);
1146 }
1147 
1148 /* ARGSUSED */
1149 static void
1150 kcf_areq_cache_destructor(void *buf, void *cdrarg)
1151 {
1152 	kcf_areq_node_t *areq = (kcf_areq_node_t *)buf;
1153 
1154 	ASSERT(areq->an_refcnt == 0);
1155 	mutex_destroy(&areq->an_lock);
1156 	cv_destroy(&areq->an_done);
1157 	cv_destroy(&areq->an_turn_cv);
1158 }
1159 
1160 /*
1161  * kmem_cache_alloc constructor for kcf_context structure.
1162  */
1163 /* ARGSUSED */
1164 static int
1165 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags)
1166 {
1167 	kcf_context_t *kctx = (kcf_context_t *)buf;
1168 
1169 	kctx->kc_refcnt = 0;
1170 	mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL);
1171 
1172 	return (0);
1173 }
1174 
1175 /* ARGSUSED */
1176 static void
1177 kcf_context_cache_destructor(void *buf, void *cdrarg)
1178 {
1179 	kcf_context_t *kctx = (kcf_context_t *)buf;
1180 
1181 	ASSERT(kctx->kc_refcnt == 0);
1182 	mutex_destroy(&kctx->kc_in_use_lock);
1183 }
1184 
1185 /*
1186  * Creates and initializes all the structures needed by the framework.
1187  */
1188 void
1189 kcf_sched_init(void)
1190 {
1191 	int i;
1192 	kcf_reqid_table_t *rt;
1193 
1194 	/*
1195 	 * Create all the kmem caches needed by the framework. We set the
1196 	 * align argument to 64, to get a slab aligned to 64-byte as well as
1197 	 * have the objects (cache_chunksize) to be a 64-byte multiple.
1198 	 * This helps to avoid false sharing as this is the size of the
1199 	 * CPU cache line.
1200 	 */
1201 	kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache",
1202 	    sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor,
1203 	    kcf_sreq_cache_destructor, NULL, NULL, NULL, 0);
1204 
1205 	kcf_areq_cache = kmem_cache_create("kcf_areq_cache",
1206 	    sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor,
1207 	    kcf_areq_cache_destructor, NULL, NULL, NULL, 0);
1208 
1209 	kcf_context_cache = kmem_cache_create("kcf_context_cache",
1210 	    sizeof (struct kcf_context), 64, kcf_context_cache_constructor,
1211 	    kcf_context_cache_destructor, NULL, NULL, NULL, 0);
1212 
1213 	gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP);
1214 
1215 	mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL);
1216 	cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL);
1217 	gswq->gs_njobs = 0;
1218 	gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
1219 	gswq->gs_first = gswq->gs_last = NULL;
1220 
1221 	/* Initialize the global reqid table */
1222 	for (i = 0; i < REQID_TABLES; i++) {
1223 		rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP);
1224 		kcf_reqid_table[i] = rt;
1225 		mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL);
1226 		rt->rt_curid = i;
1227 	}
1228 
1229 	/* Allocate and initialize the thread pool */
1230 	kcfpool_alloc();
1231 
1232 	/* Initialize the event notification list variables */
1233 	mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL);
1234 	cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL);
1235 
1236 	/* Initialize the crypto_bufcall list variables */
1237 	mutex_init(&cbuf_list_lock, NULL, MUTEX_DEFAULT, NULL);
1238 	cv_init(&cbuf_list_cv, NULL, CV_DEFAULT, NULL);
1239 
1240 	/* Create the kcf kstat */
1241 	kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto",
1242 	    KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t),
1243 	    KSTAT_FLAG_VIRTUAL);
1244 
1245 	if (kcf_misc_kstat != NULL) {
1246 		kcf_misc_kstat->ks_data = &kcf_ksdata;
1247 		kcf_misc_kstat->ks_update = kcf_misc_kstat_update;
1248 		kstat_install(kcf_misc_kstat);
1249 	}
1250 }
1251 
1252 /*
1253  * This routine should only be called by drv/cryptoadm.
1254  *
1255  * kcf_sched_running flag isn't protected by a lock. But, we are safe because
1256  * the first thread ("cryptoadm refresh") calling this routine during
1257  * boot time completes before any other thread that can call this routine.
1258  */
1259 void
1260 kcf_sched_start(void)
1261 {
1262 	if (kcf_sched_running)
1263 		return;
1264 
1265 	/* Start the background processing thread. */
1266 	(void) thread_create(NULL, 0, &crypto_bufcall_service, 0, 0, &p0,
1267 	    TS_RUN, minclsyspri);
1268 
1269 	kcf_sched_running = B_TRUE;
1270 }
1271 
1272 /*
1273  * Signal the waiting sync client.
1274  */
1275 void
1276 kcf_sop_done(kcf_sreq_node_t *sreq, int error)
1277 {
1278 	mutex_enter(&sreq->sn_lock);
1279 	sreq->sn_state = REQ_DONE;
1280 	sreq->sn_rv = error;
1281 	cv_signal(&sreq->sn_cv);
1282 	mutex_exit(&sreq->sn_lock);
1283 }
1284 
1285 /*
1286  * Callback the async client with the operation status.
1287  * We free the async request node and possibly the context.
1288  * We also handle any chain of requests hanging off of
1289  * the context.
1290  */
1291 void
1292 kcf_aop_done(kcf_areq_node_t *areq, int error)
1293 {
1294 	kcf_op_type_t optype;
1295 	boolean_t skip_notify = B_FALSE;
1296 	kcf_context_t *ictx;
1297 	kcf_areq_node_t *nextreq;
1298 
1299 	/*
1300 	 * Handle recoverable errors. This has to be done first
1301 	 * before doing any thing else in this routine so that
1302 	 * we do not change the state of the request.
1303 	 */
1304 	if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) {
1305 		/*
1306 		 * We try another provider, if one is available. Else
1307 		 * we continue with the failure notification to the
1308 		 * client.
1309 		 */
1310 		if (kcf_resubmit_request(areq) == CRYPTO_QUEUED)
1311 			return;
1312 	}
1313 
1314 	mutex_enter(&areq->an_lock);
1315 	areq->an_state = REQ_DONE;
1316 	mutex_exit(&areq->an_lock);
1317 
1318 	optype = (&areq->an_params)->rp_optype;
1319 	if ((ictx = areq->an_context) != NULL) {
1320 		/*
1321 		 * A request after it is removed from the request
1322 		 * queue, still stays on a chain of requests hanging
1323 		 * of its context structure. It needs to be removed
1324 		 * from this chain at this point.
1325 		 */
1326 		mutex_enter(&ictx->kc_in_use_lock);
1327 		nextreq = areq->an_ctxchain_next;
1328 		if (nextreq != NULL) {
1329 			mutex_enter(&nextreq->an_lock);
1330 			nextreq->an_is_my_turn = B_TRUE;
1331 			cv_signal(&nextreq->an_turn_cv);
1332 			mutex_exit(&nextreq->an_lock);
1333 		}
1334 
1335 		ictx->kc_req_chain_first = nextreq;
1336 		if (nextreq == NULL)
1337 			ictx->kc_req_chain_last = NULL;
1338 		mutex_exit(&ictx->kc_in_use_lock);
1339 
1340 		if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) {
1341 			ASSERT(nextreq == NULL);
1342 			KCF_CONTEXT_REFRELE(ictx);
1343 		} else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) {
1344 		/*
1345 		 * NOTE - We do not release the context in case of update
1346 		 * operations. We require the consumer to free it explicitly,
1347 		 * in case it wants to abandon an update operation. This is done
1348 		 * as there may be mechanisms in ECB mode that can continue
1349 		 * even if an operation on a block fails.
1350 		 */
1351 			KCF_CONTEXT_REFRELE(ictx);
1352 		}
1353 	}
1354 
1355 	/* Deal with the internal continuation to this request first */
1356 
1357 	if (areq->an_isdual) {
1358 		kcf_dual_req_t *next_arg;
1359 		next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg;
1360 		next_arg->kr_areq = areq;
1361 		KCF_AREQ_REFHOLD(areq);
1362 		areq->an_isdual = B_FALSE;
1363 
1364 		NOTIFY_CLIENT(areq, error);
1365 		return;
1366 	}
1367 
1368 	/*
1369 	 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify
1370 	 * always. If this flag is clear, we skip the notification
1371 	 * provided there are no errors.  We check this flag for only
1372 	 * init or update operations. It is ignored for single, final or
1373 	 * atomic operations.
1374 	 */
1375 	skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) &&
1376 	    (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) &&
1377 	    (error == CRYPTO_SUCCESS);
1378 
1379 	if (!skip_notify) {
1380 		NOTIFY_CLIENT(areq, error);
1381 	}
1382 
1383 	if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID))
1384 		kcf_reqid_delete(areq);
1385 
1386 	KCF_AREQ_REFRELE(areq);
1387 }
1388 
1389 /*
1390  * kcfpool thread spawner.  This runs as a process that never exits.
1391  * Its a process so that the threads it owns can be manipulated via priocntl.
1392  */
1393 static void
1394 kcfpoold(void *arg)
1395 {
1396 	callb_cpr_t	cprinfo;
1397 	user_t		*pu = PTOU(curproc);
1398 	int		cnt;
1399 	clock_t		timeout_val = drv_usectohz(kcf_idlethr_timeout);
1400 	_NOTE(ARGUNUSED(arg));
1401 
1402 	CALLB_CPR_INIT(&cprinfo, &kcfpool->kp_lock,
1403 	    callb_generic_cpr, "kcfpool");
1404 
1405 	/* make our process "kcfpoold" */
1406 	(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs), "kcfpoold");
1407 	(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
1408 
1409 	mutex_enter(&kcfpool->kp_lock);
1410 
1411 	/*
1412 	 * Go to sleep, waiting for the signaled flag.  Note that as
1413 	 * we always do the same thing, and its always idempotent, we
1414 	 * don't even need to have a real condition to check against.
1415 	 */
1416 	for (;;) {
1417 		int rv;
1418 
1419 		CALLB_CPR_SAFE_BEGIN(&cprinfo);
1420 		rv = cv_reltimedwait(&kcfpool->kp_cv,
1421 		    &kcfpool->kp_lock, timeout_val, TR_CLOCK_TICK);
1422 		CALLB_CPR_SAFE_END(&cprinfo, &kcfpool->kp_lock);
1423 
1424 		switch (rv) {
1425 		case -1:
1426 			/* Timed out. Recalculate the min/max threads */
1427 			compute_min_max_threads();
1428 			break;
1429 
1430 		default:
1431 			/* Someone may be looking for a worker thread */
1432 			break;
1433 		}
1434 
1435 		/*
1436 		 * We keep the number of running threads to be at
1437 		 * kcf_minthreads to reduce gs_lock contention.
1438 		 */
1439 		cnt = kcf_minthreads -
1440 		    (kcfpool->kp_threads - kcfpool->kp_blockedthreads);
1441 		if (cnt > 0) {
1442 			/*
1443 			 * The following ensures the number of threads in pool
1444 			 * does not exceed kcf_maxthreads.
1445 			 */
1446 			cnt = min(cnt, kcf_maxthreads - kcfpool->kp_threads);
1447 		}
1448 
1449 		for (int i = 0; i < cnt; i++) {
1450 			(void) lwp_kernel_create(curproc,
1451 			    kcfpool_svc, NULL, TS_RUN, curthread->t_pri);
1452 		}
1453 	}
1454 }
1455 
1456 /*
1457  * Allocate the thread pool and initialize all the fields.
1458  */
1459 static void
1460 kcfpool_alloc(void)
1461 {
1462 	kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP);
1463 
1464 	kcfpool->kp_threads = kcfpool->kp_idlethreads = 0;
1465 	kcfpool->kp_blockedthreads = 0;
1466 
1467 	mutex_init(&kcfpool->kp_lock, NULL, MUTEX_DEFAULT, NULL);
1468 	cv_init(&kcfpool->kp_cv, NULL, CV_DEFAULT, NULL);
1469 
1470 	kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT;
1471 
1472 	/*
1473 	 * Create the daemon thread.
1474 	 */
1475 	if (newproc(kcfpoold, NULL, syscid, minclsyspri,
1476 	    NULL, 0) != 0) {
1477 		cmn_err(CE_PANIC, "unable to fork kcfpoold()");
1478 	}
1479 }
1480 
1481 /*
1482  * This routine introduces a locking order for gswq->gs_lock followed
1483  * by cpu_lock.
1484  * This means that no consumer of the k-api should hold cpu_lock when calling
1485  * k-api routines.
1486  */
1487 static void
1488 compute_min_max_threads(void)
1489 {
1490 	mutex_enter(&gswq->gs_lock);
1491 	mutex_enter(&cpu_lock);
1492 	kcf_minthreads = curthread->t_cpupart->cp_ncpus;
1493 	mutex_exit(&cpu_lock);
1494 	kcf_maxthreads = kcf_thr_multiple * kcf_minthreads;
1495 	gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc;
1496 	mutex_exit(&gswq->gs_lock);
1497 }
1498 
1499 /*
1500  * Insert the async request in the hash table after assigning it
1501  * an ID. Returns the ID.
1502  *
1503  * The ID is used by the caller to pass as an argument to a
1504  * cancel_req() routine later.
1505  */
1506 static crypto_req_id_t
1507 kcf_reqid_insert(kcf_areq_node_t *areq)
1508 {
1509 	int indx;
1510 	crypto_req_id_t id;
1511 	kcf_areq_node_t *headp;
1512 	kcf_reqid_table_t *rt =
1513 	    kcf_reqid_table[CPU->cpu_seqid & REQID_TABLE_MASK];
1514 
1515 	mutex_enter(&rt->rt_lock);
1516 
1517 	rt->rt_curid = id =
1518 	    (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH;
1519 	SET_REQID(areq, id);
1520 	indx = REQID_HASH(id);
1521 	headp = areq->an_idnext = rt->rt_idhash[indx];
1522 	areq->an_idprev = NULL;
1523 	if (headp != NULL)
1524 		headp->an_idprev = areq;
1525 
1526 	rt->rt_idhash[indx] = areq;
1527 	mutex_exit(&rt->rt_lock);
1528 
1529 	return (id);
1530 }
1531 
1532 /*
1533  * Delete the async request from the hash table.
1534  */
1535 static void
1536 kcf_reqid_delete(kcf_areq_node_t *areq)
1537 {
1538 	int indx;
1539 	kcf_areq_node_t *nextp, *prevp;
1540 	crypto_req_id_t id = GET_REQID(areq);
1541 	kcf_reqid_table_t *rt;
1542 
1543 	rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1544 	indx = REQID_HASH(id);
1545 
1546 	mutex_enter(&rt->rt_lock);
1547 
1548 	nextp = areq->an_idnext;
1549 	prevp = areq->an_idprev;
1550 	if (nextp != NULL)
1551 		nextp->an_idprev = prevp;
1552 	if (prevp != NULL)
1553 		prevp->an_idnext = nextp;
1554 	else
1555 		rt->rt_idhash[indx] = nextp;
1556 
1557 	SET_REQID(areq, 0);
1558 	cv_broadcast(&areq->an_done);
1559 
1560 	mutex_exit(&rt->rt_lock);
1561 }
1562 
1563 /*
1564  * Cancel a single asynchronous request.
1565  *
1566  * We guarantee that no problems will result from calling
1567  * crypto_cancel_req() for a request which is either running, or
1568  * has already completed. We remove the request from any queues
1569  * if it is possible. We wait for request completion if the
1570  * request is dispatched to a provider.
1571  *
1572  * Calling context:
1573  * 	Can be called from user context only.
1574  *
1575  * NOTE: We acquire the following locks in this routine (in order):
1576  *	- rt_lock (kcf_reqid_table_t)
1577  *	- gswq->gs_lock
1578  *	- areq->an_lock
1579  *	- ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain())
1580  *
1581  * This locking order MUST be maintained in code every where else.
1582  */
1583 void
1584 crypto_cancel_req(crypto_req_id_t id)
1585 {
1586 	int indx;
1587 	kcf_areq_node_t *areq;
1588 	kcf_provider_desc_t *pd;
1589 	kcf_context_t *ictx;
1590 	kcf_reqid_table_t *rt;
1591 
1592 	rt = kcf_reqid_table[id & REQID_TABLE_MASK];
1593 	indx = REQID_HASH(id);
1594 
1595 	mutex_enter(&rt->rt_lock);
1596 	for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) {
1597 	if (GET_REQID(areq) == id) {
1598 		/*
1599 		 * We found the request. It is either still waiting
1600 		 * in the framework queues or running at the provider.
1601 		 */
1602 		pd = areq->an_provider;
1603 		ASSERT(pd != NULL);
1604 
1605 		switch (pd->pd_prov_type) {
1606 		case CRYPTO_SW_PROVIDER:
1607 			mutex_enter(&gswq->gs_lock);
1608 			mutex_enter(&areq->an_lock);
1609 
1610 			/* This request can be safely canceled. */
1611 			if (areq->an_state <= REQ_WAITING) {
1612 				/* Remove from gswq, global software queue. */
1613 				kcf_remove_node(areq);
1614 				if ((ictx = areq->an_context) != NULL)
1615 					kcf_removereq_in_ctxchain(ictx, areq);
1616 
1617 				mutex_exit(&areq->an_lock);
1618 				mutex_exit(&gswq->gs_lock);
1619 				mutex_exit(&rt->rt_lock);
1620 
1621 				/* Remove areq from hash table and free it. */
1622 				kcf_reqid_delete(areq);
1623 				KCF_AREQ_REFRELE(areq);
1624 				return;
1625 			}
1626 
1627 			mutex_exit(&areq->an_lock);
1628 			mutex_exit(&gswq->gs_lock);
1629 			break;
1630 
1631 		case CRYPTO_HW_PROVIDER:
1632 			/*
1633 			 * There is no interface to remove an entry
1634 			 * once it is on the taskq. So, we do not do
1635 			 * any thing for a hardware provider.
1636 			 */
1637 			break;
1638 		}
1639 
1640 		/*
1641 		 * The request is running. Wait for the request completion
1642 		 * to notify us.
1643 		 */
1644 		KCF_AREQ_REFHOLD(areq);
1645 		while (GET_REQID(areq) == id)
1646 			cv_wait(&areq->an_done, &rt->rt_lock);
1647 		KCF_AREQ_REFRELE(areq);
1648 		break;
1649 	}
1650 	}
1651 
1652 	mutex_exit(&rt->rt_lock);
1653 }
1654 
1655 /*
1656  * Cancel all asynchronous requests associated with the
1657  * passed in crypto context and free it.
1658  *
1659  * A client SHOULD NOT call this routine after calling a crypto_*_final
1660  * routine. This routine is called only during intermediate operations.
1661  * The client should not use the crypto context after this function returns
1662  * since we destroy it.
1663  *
1664  * Calling context:
1665  * 	Can be called from user context only.
1666  */
1667 void
1668 crypto_cancel_ctx(crypto_context_t ctx)
1669 {
1670 	kcf_context_t *ictx;
1671 	kcf_areq_node_t *areq;
1672 
1673 	if (ctx == NULL)
1674 		return;
1675 
1676 	ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private;
1677 
1678 	mutex_enter(&ictx->kc_in_use_lock);
1679 
1680 	/* Walk the chain and cancel each request */
1681 	while ((areq = ictx->kc_req_chain_first) != NULL) {
1682 		/*
1683 		 * We have to drop the lock here as we may have
1684 		 * to wait for request completion. We hold the
1685 		 * request before dropping the lock though, so that it
1686 		 * won't be freed underneath us.
1687 		 */
1688 		KCF_AREQ_REFHOLD(areq);
1689 		mutex_exit(&ictx->kc_in_use_lock);
1690 
1691 		crypto_cancel_req(GET_REQID(areq));
1692 		KCF_AREQ_REFRELE(areq);
1693 
1694 		mutex_enter(&ictx->kc_in_use_lock);
1695 	}
1696 
1697 	mutex_exit(&ictx->kc_in_use_lock);
1698 	KCF_CONTEXT_REFRELE(ictx);
1699 }
1700 
1701 /*
1702  * Update kstats.
1703  */
1704 static int
1705 kcf_misc_kstat_update(kstat_t *ksp, int rw)
1706 {
1707 	kcf_stats_t *ks_data;
1708 
1709 	if (rw == KSTAT_WRITE)
1710 		return (EACCES);
1711 
1712 	ks_data = ksp->ks_data;
1713 
1714 	ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads;
1715 	ks_data->ks_idle_thrs.value.ui32 = kcfpool->kp_idlethreads;
1716 	ks_data->ks_minthrs.value.ui32 = kcf_minthreads;
1717 	ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads;
1718 	ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs;
1719 	ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs;
1720 	ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads;
1721 	ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc;
1722 	ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc;
1723 
1724 	return (0);
1725 }
1726 
1727 /*
1728  * Allocate and initiatize a kcf_dual_req, used for saving the arguments of
1729  * a dual operation or an atomic operation that has to be internally
1730  * simulated with multiple single steps.
1731  * crq determines the memory allocation flags.
1732  */
1733 
1734 kcf_dual_req_t *
1735 kcf_alloc_req(crypto_call_req_t *crq)
1736 {
1737 	kcf_dual_req_t *kcr;
1738 
1739 	kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq));
1740 
1741 	if (kcr == NULL)
1742 		return (NULL);
1743 
1744 	/* Copy the whole crypto_call_req struct, as it isn't persistent */
1745 	if (crq != NULL)
1746 		kcr->kr_callreq = *crq;
1747 	else
1748 		bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t));
1749 	kcr->kr_areq = NULL;
1750 	kcr->kr_saveoffset = 0;
1751 	kcr->kr_savelen = 0;
1752 
1753 	return (kcr);
1754 }
1755 
1756 /*
1757  * Callback routine for the next part of a simulated dual part.
1758  * Schedules the next step.
1759  *
1760  * This routine can be called from interrupt context.
1761  */
1762 void
1763 kcf_next_req(void *next_req_arg, int status)
1764 {
1765 	kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
1766 	kcf_req_params_t *params = &(next_req->kr_params);
1767 	kcf_areq_node_t *areq = next_req->kr_areq;
1768 	int error = status;
1769 	kcf_provider_desc_t *pd;
1770 	crypto_dual_data_t *ct;
1771 
1772 	/* Stop the processing if an error occurred at this step */
1773 	if (error != CRYPTO_SUCCESS) {
1774 out:
1775 		areq->an_reqarg = next_req->kr_callreq;
1776 		KCF_AREQ_REFRELE(areq);
1777 		kmem_free(next_req, sizeof (kcf_dual_req_t));
1778 		areq->an_isdual = B_FALSE;
1779 		kcf_aop_done(areq, error);
1780 		return;
1781 	}
1782 
1783 	switch (params->rp_opgrp) {
1784 	case KCF_OG_MAC: {
1785 
1786 		/*
1787 		 * The next req is submitted with the same reqid as the
1788 		 * first part. The consumer only got back that reqid, and
1789 		 * should still be able to cancel the operation during its
1790 		 * second step.
1791 		 */
1792 		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
1793 		crypto_ctx_template_t mac_tmpl;
1794 		kcf_mech_entry_t *me;
1795 
1796 		ct = (crypto_dual_data_t *)mops->mo_data;
1797 		mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;
1798 
1799 		/* No expected recoverable failures, so no retry list */
1800 		pd = kcf_get_mech_provider(mops->mo_framework_mechtype, NULL,
1801 		    &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC, ct->dd_len2);
1802 
1803 		if (pd == NULL) {
1804 			error = CRYPTO_MECH_NOT_SUPPORTED;
1805 			goto out;
1806 		}
1807 		/* Validate the MAC context template here */
1808 		if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
1809 		    (mac_tmpl != NULL)) {
1810 			kcf_ctx_template_t *ctx_mac_tmpl;
1811 
1812 			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;
1813 
1814 			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
1815 				KCF_PROV_REFRELE(pd);
1816 				error = CRYPTO_OLD_CTX_TEMPLATE;
1817 				goto out;
1818 			}
1819 			mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
1820 		}
1821 
1822 		break;
1823 	}
1824 	case KCF_OG_DECRYPT: {
1825 		kcf_decrypt_ops_params_t *dcrops =
1826 		    &(params->rp_u.decrypt_params);
1827 
1828 		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
1829 		/* No expected recoverable failures, so no retry list */
1830 		pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
1831 		    NULL, NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
1832 		    ct->dd_len1);
1833 
1834 		if (pd == NULL) {
1835 			error = CRYPTO_MECH_NOT_SUPPORTED;
1836 			goto out;
1837 		}
1838 		break;
1839 	}
1840 	}
1841 
1842 	/* The second step uses len2 and offset2 of the dual_data */
1843 	next_req->kr_saveoffset = ct->dd_offset1;
1844 	next_req->kr_savelen = ct->dd_len1;
1845 	ct->dd_offset1 = ct->dd_offset2;
1846 	ct->dd_len1 = ct->dd_len2;
1847 
1848 	areq->an_reqarg.cr_flag = 0;
1849 
1850 	areq->an_reqarg.cr_callback_func = kcf_last_req;
1851 	areq->an_reqarg.cr_callback_arg = next_req;
1852 	areq->an_isdual = B_TRUE;
1853 
1854 	/*
1855 	 * We would like to call kcf_submit_request() here. But,
1856 	 * that is not possible as that routine allocates a new
1857 	 * kcf_areq_node_t request structure, while we need to
1858 	 * reuse the existing request structure.
1859 	 */
1860 	switch (pd->pd_prov_type) {
1861 	case CRYPTO_SW_PROVIDER:
1862 		error = common_submit_request(pd, NULL, params,
1863 		    KCF_RHNDL(KM_NOSLEEP));
1864 		break;
1865 
1866 	case CRYPTO_HW_PROVIDER: {
1867 		kcf_provider_desc_t *old_pd;
1868 		taskq_t *taskq = pd->pd_taskq;
1869 
1870 		/*
1871 		 * Set the params for the second step in the
1872 		 * dual-ops.
1873 		 */
1874 		areq->an_params = *params;
1875 		old_pd = areq->an_provider;
1876 		KCF_PROV_REFRELE(old_pd);
1877 		KCF_PROV_REFHOLD(pd);
1878 		areq->an_provider = pd;
1879 
1880 		/*
1881 		 * Note that we have to do a taskq_dispatch()
1882 		 * here as we may be in interrupt context.
1883 		 */
1884 		if (taskq_dispatch(taskq, process_req_hwp, areq,
1885 		    TQ_NOSLEEP) == (taskqid_t)0) {
1886 			error = CRYPTO_HOST_MEMORY;
1887 		} else {
1888 			error = CRYPTO_QUEUED;
1889 		}
1890 		break;
1891 	}
1892 	}
1893 
1894 	/*
1895 	 * We have to release the holds on the request and the provider
1896 	 * in all cases.
1897 	 */
1898 	KCF_AREQ_REFRELE(areq);
1899 	KCF_PROV_REFRELE(pd);
1900 
1901 	if (error != CRYPTO_QUEUED) {
1902 		/* restore, clean up, and invoke the client's callback */
1903 
1904 		ct->dd_offset1 = next_req->kr_saveoffset;
1905 		ct->dd_len1 = next_req->kr_savelen;
1906 		areq->an_reqarg = next_req->kr_callreq;
1907 		kmem_free(next_req, sizeof (kcf_dual_req_t));
1908 		areq->an_isdual = B_FALSE;
1909 		kcf_aop_done(areq, error);
1910 	}
1911 }
1912 
1913 /*
1914  * Last part of an emulated dual operation.
1915  * Clean up and restore ...
1916  */
1917 void
1918 kcf_last_req(void *last_req_arg, int status)
1919 {
1920 	kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;
1921 
1922 	kcf_req_params_t *params = &(last_req->kr_params);
1923 	kcf_areq_node_t *areq = last_req->kr_areq;
1924 	crypto_dual_data_t *ct;
1925 
1926 	switch (params->rp_opgrp) {
1927 	case KCF_OG_MAC: {
1928 		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
1929 
1930 		ct = (crypto_dual_data_t *)mops->mo_data;
1931 		break;
1932 	}
1933 	case KCF_OG_DECRYPT: {
1934 		kcf_decrypt_ops_params_t *dcrops =
1935 		    &(params->rp_u.decrypt_params);
1936 
1937 		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
1938 		break;
1939 	}
1940 	}
1941 	ct->dd_offset1 = last_req->kr_saveoffset;
1942 	ct->dd_len1 = last_req->kr_savelen;
1943 
1944 	/* The submitter used kcf_last_req as its callback */
1945 
1946 	if (areq == NULL) {
1947 		crypto_call_req_t *cr = &last_req->kr_callreq;
1948 
1949 		(*(cr->cr_callback_func))(cr->cr_callback_arg, status);
1950 		kmem_free(last_req, sizeof (kcf_dual_req_t));
1951 		return;
1952 	}
1953 	areq->an_reqarg = last_req->kr_callreq;
1954 	KCF_AREQ_REFRELE(areq);
1955 	kmem_free(last_req, sizeof (kcf_dual_req_t));
1956 	areq->an_isdual = B_FALSE;
1957 	kcf_aop_done(areq, status);
1958 }
1959