xref: /titanic_52/usr/src/uts/common/crypto/spi/kcf_spi.c (revision 94360ae1f2b5d61a6b7fd32a528b0d0860f1f1d7)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file is part of the core Kernel Cryptographic Framework.
28  * It implements the SPI functions exported to cryptographic
29  * providers.
30  */
31 
32 #include <sys/ksynch.h>
33 #include <sys/cmn_err.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/crypto/common.h>
38 #include <sys/crypto/impl.h>
39 #include <sys/crypto/sched_impl.h>
40 #include <sys/crypto/spi.h>
41 #include <sys/taskq.h>
42 #include <sys/disp.h>
43 #include <sys/kstat.h>
44 #include <sys/policy.h>
45 #include <sys/cpuvar.h>
46 
47 /*
48  * minalloc and maxalloc values to be used for taskq_create().
49  */
50 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
51 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
52 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
53 
54 static void remove_provider(kcf_provider_desc_t *);
55 static void process_logical_providers(crypto_provider_info_t *,
56     kcf_provider_desc_t *);
57 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
58 static int kcf_prov_kstat_update(kstat_t *, int);
59 static void undo_register_provider_extra(kcf_provider_desc_t *);
60 static void delete_kstat(kcf_provider_desc_t *);
61 
62 static kcf_prov_stats_t kcf_stats_ks_data_template = {
63 	{ "kcf_ops_total",		KSTAT_DATA_UINT64 },
64 	{ "kcf_ops_passed",		KSTAT_DATA_UINT64 },
65 	{ "kcf_ops_failed",		KSTAT_DATA_UINT64 },
66 	{ "kcf_ops_returned_busy",	KSTAT_DATA_UINT64 }
67 };
68 
69 #define	KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
70 	*((dst)->ops) = *((src)->ops);
71 
72 /*
73  * Copy an ops vector from src to dst. Used during provider registration
74  * to copy the ops vector from the provider info structure to the
75  * provider descriptor maintained by KCF.
76  * Copying the ops vector specified by the provider is needed since the
77  * framework does not require the provider info structure to be
78  * persistent.
79  */
80 static void
81 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
82 {
83 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
84 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
85 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
86 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
87 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
88 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
89 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
90 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
91 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
92 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
93 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
94 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
95 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
96 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
97 }
98 
99 static void
100 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
101 {
102 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
103 }
104 
105 static void
106 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
107 {
108 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
109 }
110 
111 /*
112  * This routine is used to add cryptographic providers to the KEF framework.
113  * Providers pass a crypto_provider_info structure to crypto_register_provider()
114  * and get back a handle.  The crypto_provider_info structure contains a
115  * list of mechanisms supported by the provider and an ops vector containing
116  * provider entry points.  Hardware providers call this routine in their attach
117  * routines.  Software providers call this routine in their _init() routine.
118  */
119 int
120 crypto_register_provider(crypto_provider_info_t *info,
121     crypto_kcf_provider_handle_t *handle)
122 {
123 	int need_verify;
124 	struct modctl *mcp;
125 	char *name;
126 	char ks_name[KSTAT_STRLEN];
127 
128 	kcf_provider_desc_t *prov_desc = NULL;
129 	int ret = CRYPTO_ARGUMENTS_BAD;
130 
131 	if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
132 		return (CRYPTO_VERSION_MISMATCH);
133 
134 	/*
135 	 * Check provider type, must be software, hardware, or logical.
136 	 */
137 	if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
138 	    info->pi_provider_type != CRYPTO_SW_PROVIDER &&
139 	    info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
140 		return (CRYPTO_ARGUMENTS_BAD);
141 
142 	/*
143 	 * Allocate and initialize a new provider descriptor. We also
144 	 * hold it and release it when done.
145 	 */
146 	prov_desc = kcf_alloc_provider_desc(info);
147 	KCF_PROV_REFHOLD(prov_desc);
148 
149 	prov_desc->pd_prov_type = info->pi_provider_type;
150 
151 	/* provider-private handle, opaque to KCF */
152 	prov_desc->pd_prov_handle = info->pi_provider_handle;
153 
154 	/* copy provider description string */
155 	if (info->pi_provider_description != NULL) {
156 		/*
157 		 * pi_provider_descriptor is a string that can contain
158 		 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
159 		 * INCLUDING the terminating null character. A bcopy()
160 		 * is necessary here as pd_description should not have
161 		 * a null character. See comments in kcf_alloc_provider_desc()
162 		 * for details on pd_description field.
163 		 */
164 		bcopy(info->pi_provider_description, prov_desc->pd_description,
165 		    min(strlen(info->pi_provider_description),
166 		    CRYPTO_PROVIDER_DESCR_MAX_LEN));
167 	}
168 
169 	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
170 		if (info->pi_ops_vector == NULL) {
171 			goto bail;
172 		}
173 		copy_ops_vector_v1(info->pi_ops_vector,
174 		    prov_desc->pd_ops_vector);
175 		if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
176 			copy_ops_vector_v2(info->pi_ops_vector,
177 			    prov_desc->pd_ops_vector);
178 			prov_desc->pd_flags = info->pi_flags;
179 		}
180 		if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
181 			copy_ops_vector_v3(info->pi_ops_vector,
182 			    prov_desc->pd_ops_vector);
183 		}
184 	}
185 
186 	/* object_ops and nostore_key_ops are mutually exclusive */
187 	if (prov_desc->pd_ops_vector->co_object_ops &&
188 	    prov_desc->pd_ops_vector->co_nostore_key_ops) {
189 		goto bail;
190 	}
191 	/*
192 	 * For software providers, copy the module name and module ID.
193 	 * For hardware providers, copy the driver name and instance.
194 	 */
195 	switch (info->pi_provider_type) {
196 	case  CRYPTO_SW_PROVIDER:
197 		if (info->pi_provider_dev.pd_sw == NULL)
198 			goto bail;
199 
200 		if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL)
201 			goto bail;
202 
203 		prov_desc->pd_module_id = mcp->mod_id;
204 		name = mcp->mod_modname;
205 		break;
206 
207 	case CRYPTO_HW_PROVIDER:
208 	case CRYPTO_LOGICAL_PROVIDER:
209 		if (info->pi_provider_dev.pd_hw == NULL)
210 			goto bail;
211 
212 		prov_desc->pd_instance =
213 		    ddi_get_instance(info->pi_provider_dev.pd_hw);
214 		name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw);
215 		break;
216 	}
217 	if (name == NULL)
218 		goto bail;
219 
220 	prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
221 	(void) strcpy(prov_desc->pd_name, name);
222 
223 	if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL)
224 		goto bail;
225 
226 	/* process the mechanisms supported by the provider */
227 	if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
228 		goto bail;
229 
230 	/*
231 	 * Add provider to providers tables, also sets the descriptor
232 	 * pd_prov_id field.
233 	 */
234 	if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
235 		undo_register_provider(prov_desc, B_FALSE);
236 		goto bail;
237 	}
238 
239 	if ((need_verify = kcf_need_signature_verification(prov_desc)) == -1) {
240 		undo_register_provider(prov_desc, B_TRUE);
241 		ret = CRYPTO_MODVERIFICATION_FAILED;
242 		goto bail;
243 	}
244 
245 	/*
246 	 * We create a taskq only for a hardware provider. The global
247 	 * software queue is used for software providers. We handle ordering
248 	 * of multi-part requests in the taskq routine. So, it is safe to
249 	 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
250 	 * to keep some entries cached to improve performance.
251 	 */
252 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
253 		prov_desc->pd_taskq = taskq_create("kcf_taskq",
254 		    crypto_taskq_threads, minclsyspri,
255 		    crypto_taskq_minalloc, crypto_taskq_maxalloc,
256 		    TASKQ_PREPOPULATE);
257 	else
258 		prov_desc->pd_taskq = NULL;
259 
260 	/* no kernel session to logical providers */
261 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
262 		/*
263 		 * Open a session for session-oriented providers. This session
264 		 * is used for all kernel consumers. This is fine as a provider
265 		 * is required to support multiple thread access to a session.
266 		 * We can do this only after the taskq has been created as we
267 		 * do a kcf_submit_request() to open the session.
268 		 */
269 		if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
270 			kcf_req_params_t params;
271 
272 			KCF_WRAP_SESSION_OPS_PARAMS(&params,
273 			    KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
274 			    CRYPTO_USER, NULL, 0, prov_desc);
275 			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
276 			    B_FALSE);
277 
278 			if (ret != CRYPTO_SUCCESS) {
279 				undo_register_provider(prov_desc, B_TRUE);
280 				ret = CRYPTO_FAILED;
281 				goto bail;
282 			}
283 		}
284 	}
285 
286 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
287 		/*
288 		 * Create the kstat for this provider. There is a kstat
289 		 * installed for each successfully registered provider.
290 		 * This kstat is deleted, when the provider unregisters.
291 		 */
292 		if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
293 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
294 			    prov_desc->pd_name, "provider_stats");
295 		} else {
296 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
297 			    prov_desc->pd_name, prov_desc->pd_instance,
298 			    prov_desc->pd_prov_id, "provider_stats");
299 		}
300 
301 		prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
302 		    KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
303 		    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
304 
305 		if (prov_desc->pd_kstat != NULL) {
306 			bcopy(&kcf_stats_ks_data_template,
307 			    &prov_desc->pd_ks_data,
308 			    sizeof (kcf_stats_ks_data_template));
309 			prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
310 			KCF_PROV_REFHOLD(prov_desc);
311 			prov_desc->pd_kstat->ks_private = prov_desc;
312 			prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
313 			kstat_install(prov_desc->pd_kstat);
314 		}
315 	}
316 
317 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
318 		process_logical_providers(info, prov_desc);
319 
320 	if (need_verify == 1) {
321 		/* kcf_verify_signature routine will release this hold */
322 		KCF_PROV_REFHOLD(prov_desc);
323 
324 		if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) {
325 			/*
326 			 * It is not safe to make the door upcall to kcfd from
327 			 * this context since the kcfd thread could reenter
328 			 * devfs. So, we dispatch a taskq job to do the
329 			 * verification and return to the provider.
330 			 */
331 			(void) taskq_dispatch(system_taskq,
332 			    kcf_verify_signature, (void *)prov_desc, TQ_SLEEP);
333 		} else if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
334 			kcf_verify_signature(prov_desc);
335 			if (prov_desc->pd_state ==
336 			    KCF_PROV_VERIFICATION_FAILED) {
337 				undo_register_provider_extra(prov_desc);
338 				ret = CRYPTO_MODVERIFICATION_FAILED;
339 				goto bail;
340 			}
341 		}
342 	} else {
343 		mutex_enter(&prov_desc->pd_lock);
344 		prov_desc->pd_state = KCF_PROV_READY;
345 		mutex_exit(&prov_desc->pd_lock);
346 		kcf_do_notify(prov_desc, B_TRUE);
347 	}
348 
349 	*handle = prov_desc->pd_kcf_prov_handle;
350 	ret = CRYPTO_SUCCESS;
351 
352 bail:
353 	KCF_PROV_REFRELE(prov_desc);
354 	return (ret);
355 }
356 
357 /* Return the number of holds on a provider. */
358 int
359 kcf_get_refcnt(kcf_provider_desc_t *pd, boolean_t do_lock)
360 {
361 	int i;
362 	int refcnt = 0;
363 
364 	if (do_lock)
365 		for (i = 0; i < pd->pd_nbins; i++)
366 			mutex_enter(&(pd->pd_percpu_bins[i].kp_lock));
367 
368 	for (i = 0; i < pd->pd_nbins; i++)
369 		refcnt += pd->pd_percpu_bins[i].kp_holdcnt;
370 
371 	if (do_lock)
372 		for (i = 0; i < pd->pd_nbins; i++)
373 			mutex_exit(&(pd->pd_percpu_bins[i].kp_lock));
374 
375 	return (refcnt);
376 }
377 
378 /*
379  * This routine is used to notify the framework when a provider is being
380  * removed.  Hardware providers call this routine in their detach routines.
381  * Software providers call this routine in their _fini() routine.
382  */
383 int
384 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
385 {
386 	uint_t mech_idx;
387 	kcf_provider_desc_t *desc;
388 	kcf_prov_state_t saved_state;
389 
390 	/* lookup provider descriptor */
391 	if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
392 		return (CRYPTO_UNKNOWN_PROVIDER);
393 
394 	mutex_enter(&desc->pd_lock);
395 	/*
396 	 * Check if any other thread is disabling or removing
397 	 * this provider. We return if this is the case.
398 	 */
399 	if (desc->pd_state >= KCF_PROV_DISABLED) {
400 		mutex_exit(&desc->pd_lock);
401 		/* Release reference held by kcf_prov_tab_lookup(). */
402 		KCF_PROV_REFRELE(desc);
403 		return (CRYPTO_BUSY);
404 	}
405 
406 	saved_state = desc->pd_state;
407 	desc->pd_state = KCF_PROV_UNREGISTERING;
408 
409 	if (saved_state == KCF_PROV_BUSY) {
410 		/*
411 		 * The per-provider taskq threads may be waiting. We
412 		 * signal them so that they can start failing requests.
413 		 */
414 		cv_broadcast(&desc->pd_resume_cv);
415 	}
416 
417 	mutex_exit(&desc->pd_lock);
418 
419 	if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
420 		remove_provider(desc);
421 	}
422 
423 	if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
424 		/* remove the provider from the mechanisms tables */
425 		for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
426 		    mech_idx++) {
427 			kcf_remove_mech_provider(
428 			    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
429 		}
430 	}
431 
432 	/* remove provider from providers table */
433 	if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
434 	    CRYPTO_SUCCESS) {
435 		/* Release reference held by kcf_prov_tab_lookup(). */
436 		KCF_PROV_REFRELE(desc);
437 		return (CRYPTO_UNKNOWN_PROVIDER);
438 	}
439 
440 	delete_kstat(desc);
441 
442 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
443 		/*
444 		 * Wait till the existing requests with the provider complete
445 		 * and all the holds are released. All the holds on a software
446 		 * provider are from kernel clients and the hold time
447 		 * is expected to be short. So, we won't be stuck here forever.
448 		 */
449 		while (kcf_get_refcnt(desc, B_TRUE) > 1) {
450 			/* wait 1 second and try again. */
451 			delay(1 * drv_usectohz(1000000));
452 		}
453 	} else {
454 		int i;
455 		kcf_prov_cpu_t *mp;
456 
457 		/*
458 		 * Wait until requests that have been sent to the provider
459 		 * complete.
460 		 */
461 		for (i = 0; i < desc->pd_nbins; i++) {
462 			mp = &(desc->pd_percpu_bins[i]);
463 
464 			mutex_enter(&mp->kp_lock);
465 			while (mp->kp_jobcnt > 0) {
466 				cv_wait(&mp->kp_cv, &mp->kp_lock);
467 			}
468 			mutex_exit(&mp->kp_lock);
469 		}
470 	}
471 
472 	mutex_enter(&desc->pd_lock);
473 	desc->pd_state = KCF_PROV_UNREGISTERED;
474 	mutex_exit(&desc->pd_lock);
475 
476 	kcf_do_notify(desc, B_FALSE);
477 
478 	mutex_enter(&prov_tab_mutex);
479 	/* Release reference held by kcf_prov_tab_lookup(). */
480 	KCF_PROV_REFRELE(desc);
481 
482 	if (kcf_get_refcnt(desc, B_TRUE) == 0) {
483 		/* kcf_free_provider_desc drops prov_tab_mutex */
484 		kcf_free_provider_desc(desc);
485 	} else {
486 		ASSERT(desc->pd_prov_type != CRYPTO_SW_PROVIDER);
487 		/*
488 		 * We could avoid this if /dev/crypto can proactively
489 		 * remove any holds on us from a dormant PKCS #11 app.
490 		 * For now, we check the provider table for
491 		 * KCF_PROV_UNREGISTERED entries when a provider is
492 		 * added to the table or when a provider is removed from it
493 		 * and free them when refcnt reaches zero.
494 		 */
495 		kcf_need_provtab_walk = B_TRUE;
496 		mutex_exit(&prov_tab_mutex);
497 	}
498 
499 	return (CRYPTO_SUCCESS);
500 }
501 
502 /*
503  * This routine is used to notify the framework that the state of
504  * a cryptographic provider has changed. Valid state codes are:
505  *
506  * CRYPTO_PROVIDER_READY
507  * 	The provider indicates that it can process more requests. A provider
508  *	will notify with this event if it previously has notified us with a
509  *	CRYPTO_PROVIDER_BUSY.
510  *
511  * CRYPTO_PROVIDER_BUSY
512  * 	The provider can not take more requests.
513  *
514  * CRYPTO_PROVIDER_FAILED
515  *	The provider encountered an internal error. The framework will not
516  * 	be sending any more requests to the provider. The provider may notify
517  *	with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
518  *
519  * This routine can be called from user or interrupt context.
520  */
521 void
522 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
523 {
524 	kcf_provider_desc_t *pd;
525 
526 	/* lookup the provider from the given handle */
527 	if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
528 		return;
529 
530 	mutex_enter(&pd->pd_lock);
531 
532 	if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
533 		goto out;
534 
535 	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
536 		cmn_err(CE_WARN, "crypto_provider_notification: "
537 		    "logical provider (%x) ignored\n", handle);
538 		goto out;
539 	}
540 	switch (state) {
541 	case CRYPTO_PROVIDER_READY:
542 		switch (pd->pd_state) {
543 		case KCF_PROV_BUSY:
544 			pd->pd_state = KCF_PROV_READY;
545 			/*
546 			 * Signal the per-provider taskq threads that they
547 			 * can start submitting requests.
548 			 */
549 			cv_broadcast(&pd->pd_resume_cv);
550 			break;
551 
552 		case KCF_PROV_FAILED:
553 			/*
554 			 * The provider recovered from the error. Let us
555 			 * use it now.
556 			 */
557 			pd->pd_state = KCF_PROV_READY;
558 			break;
559 		}
560 		break;
561 
562 	case CRYPTO_PROVIDER_BUSY:
563 		switch (pd->pd_state) {
564 		case KCF_PROV_READY:
565 			pd->pd_state = KCF_PROV_BUSY;
566 			break;
567 		}
568 		break;
569 
570 	case CRYPTO_PROVIDER_FAILED:
571 		/*
572 		 * We note the failure and return. The per-provider taskq
573 		 * threads check this flag and start failing the
574 		 * requests, if it is set. See process_req_hwp() for details.
575 		 */
576 		switch (pd->pd_state) {
577 		case KCF_PROV_READY:
578 			pd->pd_state = KCF_PROV_FAILED;
579 			break;
580 
581 		case KCF_PROV_BUSY:
582 			pd->pd_state = KCF_PROV_FAILED;
583 			/*
584 			 * The per-provider taskq threads may be waiting. We
585 			 * signal them so that they can start failing requests.
586 			 */
587 			cv_broadcast(&pd->pd_resume_cv);
588 			break;
589 		}
590 		break;
591 	}
592 out:
593 	mutex_exit(&pd->pd_lock);
594 	KCF_PROV_REFRELE(pd);
595 }
596 
597 /*
598  * This routine is used to notify the framework the result of
599  * an asynchronous request handled by a provider. Valid error
600  * codes are the same as the CRYPTO_* errors defined in common.h.
601  *
602  * This routine can be called from user or interrupt context.
603  */
604 void
605 crypto_op_notification(crypto_req_handle_t handle, int error)
606 {
607 	kcf_call_type_t ctype;
608 
609 	if (handle == NULL)
610 		return;
611 
612 	if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
613 		kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
614 
615 		KCF_PROV_JOB_RELE_STAT(sreq->sn_mp, (error != CRYPTO_SUCCESS));
616 		kcf_sop_done(sreq, error);
617 	} else {
618 		kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
619 
620 		ASSERT(ctype == CRYPTO_ASYNCH);
621 		KCF_PROV_JOB_RELE_STAT(areq->an_mp, (error != CRYPTO_SUCCESS));
622 		kcf_aop_done(areq, error);
623 	}
624 }
625 
626 /*
627  * This routine is used by software providers to determine
628  * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
629  * Note that hardware providers can always use KM_SLEEP. So,
630  * they do not need to call this routine.
631  *
632  * This routine can be called from user or interrupt context.
633  */
634 int
635 crypto_kmflag(crypto_req_handle_t handle)
636 {
637 	return (REQHNDL2_KMFLAG(handle));
638 }
639 
640 /*
641  * Process the mechanism info structures specified by the provider
642  * during registration. A NULL crypto_provider_info_t indicates
643  * an already initialized provider descriptor.
644  *
645  * Mechanisms are not added to the kernel's mechanism table if the
646  * provider is a logical provider.
647  *
648  * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
649  * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
650  * if the table of mechanisms is full.
651  */
652 static int
653 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
654 {
655 	uint_t mech_idx;
656 	uint_t cleanup_idx;
657 	int err = CRYPTO_SUCCESS;
658 	kcf_prov_mech_desc_t *pmd;
659 	int desc_use_count = 0;
660 	int mcount = desc->pd_mech_list_count;
661 
662 	if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
663 		if (info != NULL) {
664 			ASSERT(info->pi_mechanisms != NULL);
665 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
666 			    sizeof (crypto_mech_info_t) * mcount);
667 		}
668 		return (CRYPTO_SUCCESS);
669 	}
670 
671 	/*
672 	 * Copy the mechanism list from the provider info to the provider
673 	 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
674 	 * element if the provider has random_ops since we keep an internal
675 	 * mechanism, SUN_RANDOM, in this case.
676 	 */
677 	if (info != NULL) {
678 		if (info->pi_ops_vector->co_random_ops != NULL) {
679 			crypto_mech_info_t *rand_mi;
680 
681 			/*
682 			 * Need the following check as it is possible to have
683 			 * a provider that implements just random_ops and has
684 			 * pi_mechanisms == NULL.
685 			 */
686 			if (info->pi_mechanisms != NULL) {
687 				bcopy(info->pi_mechanisms, desc->pd_mechanisms,
688 				    sizeof (crypto_mech_info_t) * (mcount - 1));
689 			}
690 			rand_mi = &desc->pd_mechanisms[mcount - 1];
691 
692 			bzero(rand_mi, sizeof (crypto_mech_info_t));
693 			(void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
694 			    CRYPTO_MAX_MECH_NAME);
695 			rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
696 		} else {
697 			ASSERT(info->pi_mechanisms != NULL);
698 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
699 			    sizeof (crypto_mech_info_t) * mcount);
700 		}
701 	}
702 
703 	/*
704 	 * For each mechanism support by the provider, add the provider
705 	 * to the corresponding KCF mechanism mech_entry chain.
706 	 */
707 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
708 		crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
709 
710 		if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
711 		    (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
712 			err = CRYPTO_ARGUMENTS_BAD;
713 			break;
714 		}
715 
716 		if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
717 		    mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
718 			/*
719 			 * We ask the provider to specify the limit
720 			 * per hash mechanism. But, in practice, a
721 			 * hardware limitation means all hash mechanisms
722 			 * will have the same maximum size allowed for
723 			 * input data. So, we make it a per provider
724 			 * limit to keep it simple.
725 			 */
726 			if (mi->cm_max_input_length == 0) {
727 				err = CRYPTO_ARGUMENTS_BAD;
728 				break;
729 			} else {
730 				desc->pd_hash_limit = mi->cm_max_input_length;
731 			}
732 		}
733 
734 		if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
735 		    KCF_SUCCESS)
736 			break;
737 
738 		if (pmd == NULL)
739 			continue;
740 
741 		/* The provider will be used for this mechanism */
742 		desc_use_count++;
743 	}
744 
745 	/*
746 	 * Don't allow multiple software providers with disabled mechanisms
747 	 * to register. Subsequent enabling of mechanisms will result in
748 	 * an unsupported configuration, i.e. multiple software providers
749 	 * per mechanism.
750 	 */
751 	if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
752 		return (CRYPTO_ARGUMENTS_BAD);
753 
754 	if (err == KCF_SUCCESS)
755 		return (CRYPTO_SUCCESS);
756 
757 	/*
758 	 * An error occurred while adding the mechanism, cleanup
759 	 * and bail.
760 	 */
761 	for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
762 		kcf_remove_mech_provider(
763 		    desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
764 	}
765 
766 	if (err == KCF_MECH_TAB_FULL)
767 		return (CRYPTO_HOST_MEMORY);
768 
769 	return (CRYPTO_ARGUMENTS_BAD);
770 }
771 
772 /*
773  * Update routine for kstat. Only privileged users are allowed to
774  * access this information, since this information is sensitive.
775  * There are some cryptographic attacks (e.g. traffic analysis)
776  * which can use this information.
777  */
778 static int
779 kcf_prov_kstat_update(kstat_t *ksp, int rw)
780 {
781 	kcf_prov_stats_t *ks_data;
782 	kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
783 	int i;
784 
785 	if (rw == KSTAT_WRITE)
786 		return (EACCES);
787 
788 	ks_data = ksp->ks_data;
789 
790 	if (secpolicy_sys_config(CRED(), B_TRUE) != 0) {
791 		ks_data->ps_ops_total.value.ui64 = 0;
792 		ks_data->ps_ops_passed.value.ui64 = 0;
793 		ks_data->ps_ops_failed.value.ui64 = 0;
794 		ks_data->ps_ops_busy_rval.value.ui64 = 0;
795 	} else {
796 		uint64_t dtotal, ftotal, btotal;
797 
798 		dtotal = ftotal = btotal = 0;
799 		/* No locking done since an exact count is not required. */
800 		for (i = 0; i < pd->pd_nbins; i++) {
801 			dtotal += pd->pd_percpu_bins[i].kp_ndispatches;
802 			ftotal += pd->pd_percpu_bins[i].kp_nfails;
803 			btotal += pd->pd_percpu_bins[i].kp_nbusy_rval;
804 		}
805 
806 		ks_data->ps_ops_total.value.ui64 = dtotal;
807 		ks_data->ps_ops_failed.value.ui64 = ftotal;
808 		ks_data->ps_ops_busy_rval.value.ui64 = btotal;
809 		ks_data->ps_ops_passed.value.ui64 = dtotal - ftotal - btotal;
810 	}
811 
812 	return (0);
813 }
814 
815 
816 /*
817  * Utility routine called from failure paths in crypto_register_provider()
818  * and from crypto_load_soft_disabled().
819  */
820 void
821 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
822 {
823 	uint_t mech_idx;
824 
825 	/* remove the provider from the mechanisms tables */
826 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
827 	    mech_idx++) {
828 		kcf_remove_mech_provider(
829 		    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
830 	}
831 
832 	/* remove provider from providers table */
833 	if (remove_prov)
834 		(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
835 }
836 
837 static void
838 undo_register_provider_extra(kcf_provider_desc_t *desc)
839 {
840 	delete_kstat(desc);
841 	undo_register_provider(desc, B_TRUE);
842 }
843 
844 /*
845  * Utility routine called from crypto_load_soft_disabled(). Callers
846  * should have done a prior undo_register_provider().
847  */
848 void
849 redo_register_provider(kcf_provider_desc_t *pd)
850 {
851 	/* process the mechanisms supported by the provider */
852 	(void) init_prov_mechs(NULL, pd);
853 
854 	/*
855 	 * Hold provider in providers table. We should not call
856 	 * kcf_prov_tab_add_provider() here as the provider descriptor
857 	 * is still valid which means it has an entry in the provider
858 	 * table.
859 	 */
860 	KCF_PROV_REFHOLD(pd);
861 }
862 
863 /*
864  * Add provider (p1) to another provider's array of providers (p2).
865  * Hardware and logical providers use this array to cross-reference
866  * each other.
867  */
868 static void
869 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
870 {
871 	kcf_provider_list_t *new;
872 
873 	new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
874 	mutex_enter(&p2->pd_lock);
875 	new->pl_next = p2->pd_provider_list;
876 	p2->pd_provider_list = new;
877 	new->pl_provider = p1;
878 	mutex_exit(&p2->pd_lock);
879 }
880 
881 /*
882  * Remove provider (p1) from another provider's array of providers (p2).
883  * Hardware and logical providers use this array to cross-reference
884  * each other.
885  */
886 static void
887 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
888 {
889 
890 	kcf_provider_list_t *pl = NULL, **prev;
891 
892 	mutex_enter(&p2->pd_lock);
893 	for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
894 	    pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
895 		if (pl->pl_provider == p1) {
896 			break;
897 		}
898 	}
899 
900 	if (p1 == NULL) {
901 		mutex_exit(&p2->pd_lock);
902 		return;
903 	}
904 
905 	/* detach and free kcf_provider_list structure */
906 	*prev = pl->pl_next;
907 	kmem_free(pl, sizeof (*pl));
908 	mutex_exit(&p2->pd_lock);
909 }
910 
911 /*
912  * Convert an array of logical provider handles (crypto_provider_id)
913  * stored in a crypto_provider_info structure into an array of provider
914  * descriptors (kcf_provider_desc_t) attached to a logical provider.
915  */
916 static void
917 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
918 {
919 	kcf_provider_desc_t *lp;
920 	crypto_provider_id_t handle;
921 	int count = info->pi_logical_provider_count;
922 	int i;
923 
924 	/* add hardware provider to each logical provider */
925 	for (i = 0; i < count; i++) {
926 		handle = info->pi_logical_providers[i];
927 		lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
928 		if (lp == NULL) {
929 			continue;
930 		}
931 		add_provider_to_array(hp, lp);
932 		hp->pd_flags |= KCF_LPROV_MEMBER;
933 
934 		/*
935 		 * A hardware provider has to have the provider descriptor of
936 		 * every logical provider it belongs to, so it can be removed
937 		 * from the logical provider if the hardware provider
938 		 * unregisters from the framework.
939 		 */
940 		add_provider_to_array(lp, hp);
941 		KCF_PROV_REFRELE(lp);
942 	}
943 }
944 
945 /*
946  * This routine removes a provider from all of the logical or
947  * hardware providers it belongs to, and frees the provider's
948  * array of pointers to providers.
949  */
950 static void
951 remove_provider(kcf_provider_desc_t *pp)
952 {
953 	kcf_provider_desc_t *p;
954 	kcf_provider_list_t *e, *next;
955 
956 	mutex_enter(&pp->pd_lock);
957 	for (e = pp->pd_provider_list; e != NULL; e = next) {
958 		p = e->pl_provider;
959 		remove_provider_from_array(pp, p);
960 		if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
961 		    p->pd_provider_list == NULL)
962 			p->pd_flags &= ~KCF_LPROV_MEMBER;
963 		next = e->pl_next;
964 		kmem_free(e, sizeof (*e));
965 	}
966 	pp->pd_provider_list = NULL;
967 	mutex_exit(&pp->pd_lock);
968 }
969 
970 /*
971  * Dispatch events as needed for a provider. is_added flag tells
972  * whether the provider is registering or unregistering.
973  */
974 void
975 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
976 {
977 	int i;
978 	crypto_notify_event_change_t ec;
979 
980 	ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
981 
982 	/*
983 	 * Inform interested clients of the mechanisms becoming
984 	 * available/unavailable. We skip this for logical providers
985 	 * as they do not affect mechanisms.
986 	 */
987 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
988 		ec.ec_provider_type = prov_desc->pd_prov_type;
989 		ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
990 		    CRYPTO_MECH_REMOVED;
991 		for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
992 			/* Skip any mechanisms not allowed by the policy */
993 			if (is_mech_disabled(prov_desc,
994 			    prov_desc->pd_mechanisms[i].cm_mech_name))
995 				continue;
996 
997 			(void) strncpy(ec.ec_mech_name,
998 			    prov_desc->pd_mechanisms[i].cm_mech_name,
999 			    CRYPTO_MAX_MECH_NAME);
1000 			kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
1001 		}
1002 
1003 	}
1004 
1005 	/*
1006 	 * Inform interested clients about the new or departing provider.
1007 	 * In case of a logical provider, we need to notify the event only
1008 	 * for the logical provider and not for the underlying
1009 	 * providers which are known by the KCF_LPROV_MEMBER bit.
1010 	 */
1011 	if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
1012 	    (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
1013 		kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
1014 		    CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
1015 	}
1016 }
1017 
1018 static void
1019 delete_kstat(kcf_provider_desc_t *desc)
1020 {
1021 	/* destroy the kstat created for this provider */
1022 	if (desc->pd_kstat != NULL) {
1023 		kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
1024 
1025 		/* release reference held by desc->pd_kstat->ks_private */
1026 		ASSERT(desc == kspd);
1027 		kstat_delete(kspd->pd_kstat);
1028 		desc->pd_kstat = NULL;
1029 		KCF_PROV_REFRELE(kspd);
1030 	}
1031 }
1032