xref: /freebsd/sys/contrib/openzfs/module/icp/spi/kcf_spi.c (revision cfd6422a5217410fbd66f7a7a8a64d9d85e61229)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file is part of the core Kernel Cryptographic Framework.
28  * It implements the SPI functions exported to cryptographic
29  * providers.
30  */
31 
32 
33 #include <sys/zfs_context.h>
34 #include <sys/crypto/common.h>
35 #include <sys/crypto/impl.h>
36 #include <sys/crypto/sched_impl.h>
37 #include <sys/crypto/spi.h>
38 
39 /*
40  * minalloc and maxalloc values to be used for taskq_create().
41  */
42 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
43 int crypto_taskq_minalloc = CRYPTO_TASKQ_MIN;
44 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
45 
46 static void remove_provider(kcf_provider_desc_t *);
47 static void process_logical_providers(crypto_provider_info_t *,
48     kcf_provider_desc_t *);
49 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
50 static int kcf_prov_kstat_update(kstat_t *, int);
51 static void delete_kstat(kcf_provider_desc_t *);
52 
53 static kcf_prov_stats_t kcf_stats_ks_data_template = {
54 	{ "kcf_ops_total",		KSTAT_DATA_UINT64 },
55 	{ "kcf_ops_passed",		KSTAT_DATA_UINT64 },
56 	{ "kcf_ops_failed",		KSTAT_DATA_UINT64 },
57 	{ "kcf_ops_returned_busy",	KSTAT_DATA_UINT64 }
58 };
59 
60 #define	KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
61 	*((dst)->ops) = *((src)->ops);
62 
63 /*
64  * Copy an ops vector from src to dst. Used during provider registration
65  * to copy the ops vector from the provider info structure to the
66  * provider descriptor maintained by KCF.
67  * Copying the ops vector specified by the provider is needed since the
68  * framework does not require the provider info structure to be
69  * persistent.
70  */
71 static void
72 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
73 {
74 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
75 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
76 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
77 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
78 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
79 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
80 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
81 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
82 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
83 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
84 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
85 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
86 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
87 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
88 }
89 
90 static void
91 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
92 {
93 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
94 }
95 
96 static void
97 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
98 {
99 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
100 }
101 
102 /*
103  * This routine is used to add cryptographic providers to the KEF framework.
104  * Providers pass a crypto_provider_info structure to crypto_register_provider()
105  * and get back a handle.  The crypto_provider_info structure contains a
106  * list of mechanisms supported by the provider and an ops vector containing
107  * provider entry points.  Hardware providers call this routine in their attach
108  * routines.  Software providers call this routine in their _init() routine.
109  */
110 int
111 crypto_register_provider(crypto_provider_info_t *info,
112     crypto_kcf_provider_handle_t *handle)
113 {
114 	char *ks_name;
115 
116 	kcf_provider_desc_t *prov_desc = NULL;
117 	int ret = CRYPTO_ARGUMENTS_BAD;
118 
119 	if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
120 		return (CRYPTO_VERSION_MISMATCH);
121 
122 	/*
123 	 * Check provider type, must be software, hardware, or logical.
124 	 */
125 	if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
126 	    info->pi_provider_type != CRYPTO_SW_PROVIDER &&
127 	    info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
128 		return (CRYPTO_ARGUMENTS_BAD);
129 
130 	/*
131 	 * Allocate and initialize a new provider descriptor. We also
132 	 * hold it and release it when done.
133 	 */
134 	prov_desc = kcf_alloc_provider_desc(info);
135 	KCF_PROV_REFHOLD(prov_desc);
136 
137 	prov_desc->pd_prov_type = info->pi_provider_type;
138 
139 	/* provider-private handle, opaque to KCF */
140 	prov_desc->pd_prov_handle = info->pi_provider_handle;
141 
142 	/* copy provider description string */
143 	if (info->pi_provider_description != NULL) {
144 		/*
145 		 * pi_provider_descriptor is a string that can contain
146 		 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
147 		 * INCLUDING the terminating null character. A bcopy()
148 		 * is necessary here as pd_description should not have
149 		 * a null character. See comments in kcf_alloc_provider_desc()
150 		 * for details on pd_description field.
151 		 */
152 		bcopy(info->pi_provider_description, prov_desc->pd_description,
153 		    MIN(strlen(info->pi_provider_description),
154 		    (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN));
155 	}
156 
157 	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
158 		if (info->pi_ops_vector == NULL) {
159 			goto bail;
160 		}
161 		copy_ops_vector_v1(info->pi_ops_vector,
162 		    prov_desc->pd_ops_vector);
163 		if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
164 			copy_ops_vector_v2(info->pi_ops_vector,
165 			    prov_desc->pd_ops_vector);
166 			prov_desc->pd_flags = info->pi_flags;
167 		}
168 		if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
169 			copy_ops_vector_v3(info->pi_ops_vector,
170 			    prov_desc->pd_ops_vector);
171 		}
172 	}
173 
174 	/* object_ops and nostore_key_ops are mutually exclusive */
175 	if (prov_desc->pd_ops_vector->co_object_ops &&
176 	    prov_desc->pd_ops_vector->co_nostore_key_ops) {
177 		goto bail;
178 	}
179 
180 	/* process the mechanisms supported by the provider */
181 	if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
182 		goto bail;
183 
184 	/*
185 	 * Add provider to providers tables, also sets the descriptor
186 	 * pd_prov_id field.
187 	 */
188 	if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
189 		undo_register_provider(prov_desc, B_FALSE);
190 		goto bail;
191 	}
192 
193 	/*
194 	 * We create a taskq only for a hardware provider. The global
195 	 * software queue is used for software providers. We handle ordering
196 	 * of multi-part requests in the taskq routine. So, it is safe to
197 	 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
198 	 * to keep some entries cached to improve performance.
199 	 */
200 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
201 		prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
202 		    crypto_taskq_threads, minclsyspri,
203 		    crypto_taskq_minalloc, crypto_taskq_maxalloc,
204 		    TASKQ_PREPOPULATE);
205 	else
206 		prov_desc->pd_sched_info.ks_taskq = NULL;
207 
208 	/* no kernel session to logical providers */
209 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
210 		/*
211 		 * Open a session for session-oriented providers. This session
212 		 * is used for all kernel consumers. This is fine as a provider
213 		 * is required to support multiple thread access to a session.
214 		 * We can do this only after the taskq has been created as we
215 		 * do a kcf_submit_request() to open the session.
216 		 */
217 		if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
218 			kcf_req_params_t params;
219 
220 			KCF_WRAP_SESSION_OPS_PARAMS(&params,
221 			    KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
222 			    CRYPTO_USER, NULL, 0, prov_desc);
223 			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
224 			    B_FALSE);
225 
226 			if (ret != CRYPTO_SUCCESS) {
227 				undo_register_provider(prov_desc, B_TRUE);
228 				ret = CRYPTO_FAILED;
229 				goto bail;
230 			}
231 		}
232 	}
233 
234 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
235 		/*
236 		 * Create the kstat for this provider. There is a kstat
237 		 * installed for each successfully registered provider.
238 		 * This kstat is deleted, when the provider unregisters.
239 		 */
240 		if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
241 			ks_name = kmem_asprintf("%s_%s",
242 			    "NONAME", "provider_stats");
243 		} else {
244 			ks_name = kmem_asprintf("%s_%d_%u_%s",
245 			    "NONAME", 0, prov_desc->pd_prov_id,
246 			    "provider_stats");
247 		}
248 
249 		prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
250 		    KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
251 		    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
252 
253 		if (prov_desc->pd_kstat != NULL) {
254 			bcopy(&kcf_stats_ks_data_template,
255 			    &prov_desc->pd_ks_data,
256 			    sizeof (kcf_stats_ks_data_template));
257 			prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
258 			KCF_PROV_REFHOLD(prov_desc);
259 			KCF_PROV_IREFHOLD(prov_desc);
260 			prov_desc->pd_kstat->ks_private = prov_desc;
261 			prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
262 			kstat_install(prov_desc->pd_kstat);
263 		}
264 		kmem_strfree(ks_name);
265 	}
266 
267 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
268 		process_logical_providers(info, prov_desc);
269 
270 	mutex_enter(&prov_desc->pd_lock);
271 	prov_desc->pd_state = KCF_PROV_READY;
272 	mutex_exit(&prov_desc->pd_lock);
273 	kcf_do_notify(prov_desc, B_TRUE);
274 
275 	*handle = prov_desc->pd_kcf_prov_handle;
276 	ret = CRYPTO_SUCCESS;
277 
278 bail:
279 	KCF_PROV_REFRELE(prov_desc);
280 	return (ret);
281 }
282 
283 /*
284  * This routine is used to notify the framework when a provider is being
285  * removed.  Hardware providers call this routine in their detach routines.
286  * Software providers call this routine in their _fini() routine.
287  */
288 int
289 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
290 {
291 	uint_t mech_idx;
292 	kcf_provider_desc_t *desc;
293 	kcf_prov_state_t saved_state;
294 
295 	/* lookup provider descriptor */
296 	if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
297 		return (CRYPTO_UNKNOWN_PROVIDER);
298 
299 	mutex_enter(&desc->pd_lock);
300 	/*
301 	 * Check if any other thread is disabling or removing
302 	 * this provider. We return if this is the case.
303 	 */
304 	if (desc->pd_state >= KCF_PROV_DISABLED) {
305 		mutex_exit(&desc->pd_lock);
306 		/* Release reference held by kcf_prov_tab_lookup(). */
307 		KCF_PROV_REFRELE(desc);
308 		return (CRYPTO_BUSY);
309 	}
310 
311 	saved_state = desc->pd_state;
312 	desc->pd_state = KCF_PROV_REMOVED;
313 
314 	if (saved_state == KCF_PROV_BUSY) {
315 		/*
316 		 * The per-provider taskq threads may be waiting. We
317 		 * signal them so that they can start failing requests.
318 		 */
319 		cv_broadcast(&desc->pd_resume_cv);
320 	}
321 
322 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
323 		/*
324 		 * Check if this provider is currently being used.
325 		 * pd_irefcnt is the number of holds from the internal
326 		 * structures. We add one to account for the above lookup.
327 		 */
328 		if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
329 			desc->pd_state = saved_state;
330 			mutex_exit(&desc->pd_lock);
331 			/* Release reference held by kcf_prov_tab_lookup(). */
332 			KCF_PROV_REFRELE(desc);
333 			/*
334 			 * The administrator presumably will stop the clients
335 			 * thus removing the holds, when they get the busy
336 			 * return value.  Any retry will succeed then.
337 			 */
338 			return (CRYPTO_BUSY);
339 		}
340 	}
341 	mutex_exit(&desc->pd_lock);
342 
343 	if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
344 		remove_provider(desc);
345 	}
346 
347 	if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
348 		/* remove the provider from the mechanisms tables */
349 		for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
350 		    mech_idx++) {
351 			kcf_remove_mech_provider(
352 			    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
353 		}
354 	}
355 
356 	/* remove provider from providers table */
357 	if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
358 	    CRYPTO_SUCCESS) {
359 		/* Release reference held by kcf_prov_tab_lookup(). */
360 		KCF_PROV_REFRELE(desc);
361 		return (CRYPTO_UNKNOWN_PROVIDER);
362 	}
363 
364 	delete_kstat(desc);
365 
366 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
367 		/* Release reference held by kcf_prov_tab_lookup(). */
368 		KCF_PROV_REFRELE(desc);
369 
370 		/*
371 		 * Wait till the existing requests complete.
372 		 */
373 		mutex_enter(&desc->pd_lock);
374 		while (desc->pd_state != KCF_PROV_FREED)
375 			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
376 		mutex_exit(&desc->pd_lock);
377 	} else {
378 		/*
379 		 * Wait until requests that have been sent to the provider
380 		 * complete.
381 		 */
382 		mutex_enter(&desc->pd_lock);
383 		while (desc->pd_irefcnt > 0)
384 			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
385 		mutex_exit(&desc->pd_lock);
386 	}
387 
388 	kcf_do_notify(desc, B_FALSE);
389 
390 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
391 		/*
392 		 * This is the only place where kcf_free_provider_desc()
393 		 * is called directly. KCF_PROV_REFRELE() should free the
394 		 * structure in all other places.
395 		 */
396 		ASSERT(desc->pd_state == KCF_PROV_FREED &&
397 		    desc->pd_refcnt == 0);
398 		kcf_free_provider_desc(desc);
399 	} else {
400 		KCF_PROV_REFRELE(desc);
401 	}
402 
403 	return (CRYPTO_SUCCESS);
404 }
405 
406 /*
407  * This routine is used to notify the framework that the state of
408  * a cryptographic provider has changed. Valid state codes are:
409  *
410  * CRYPTO_PROVIDER_READY
411  * 	The provider indicates that it can process more requests. A provider
412  *	will notify with this event if it previously has notified us with a
413  *	CRYPTO_PROVIDER_BUSY.
414  *
415  * CRYPTO_PROVIDER_BUSY
416  * 	The provider can not take more requests.
417  *
418  * CRYPTO_PROVIDER_FAILED
419  *	The provider encountered an internal error. The framework will not
420  * 	be sending any more requests to the provider. The provider may notify
421  *	with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
422  *
423  * This routine can be called from user or interrupt context.
424  */
425 void
426 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
427 {
428 	kcf_provider_desc_t *pd;
429 
430 	/* lookup the provider from the given handle */
431 	if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
432 		return;
433 
434 	mutex_enter(&pd->pd_lock);
435 
436 	if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
437 		goto out;
438 
439 	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
440 		cmn_err(CE_WARN, "crypto_provider_notification: "
441 		    "logical provider (%x) ignored\n", handle);
442 		goto out;
443 	}
444 	switch (state) {
445 	case CRYPTO_PROVIDER_READY:
446 		switch (pd->pd_state) {
447 		case KCF_PROV_BUSY:
448 			pd->pd_state = KCF_PROV_READY;
449 			/*
450 			 * Signal the per-provider taskq threads that they
451 			 * can start submitting requests.
452 			 */
453 			cv_broadcast(&pd->pd_resume_cv);
454 			break;
455 
456 		case KCF_PROV_FAILED:
457 			/*
458 			 * The provider recovered from the error. Let us
459 			 * use it now.
460 			 */
461 			pd->pd_state = KCF_PROV_READY;
462 			break;
463 		default:
464 			break;
465 		}
466 		break;
467 
468 	case CRYPTO_PROVIDER_BUSY:
469 		switch (pd->pd_state) {
470 		case KCF_PROV_READY:
471 			pd->pd_state = KCF_PROV_BUSY;
472 			break;
473 		default:
474 			break;
475 		}
476 		break;
477 
478 	case CRYPTO_PROVIDER_FAILED:
479 		/*
480 		 * We note the failure and return. The per-provider taskq
481 		 * threads check this flag and start failing the
482 		 * requests, if it is set. See process_req_hwp() for details.
483 		 */
484 		switch (pd->pd_state) {
485 		case KCF_PROV_READY:
486 			pd->pd_state = KCF_PROV_FAILED;
487 			break;
488 
489 		case KCF_PROV_BUSY:
490 			pd->pd_state = KCF_PROV_FAILED;
491 			/*
492 			 * The per-provider taskq threads may be waiting. We
493 			 * signal them so that they can start failing requests.
494 			 */
495 			cv_broadcast(&pd->pd_resume_cv);
496 			break;
497 		default:
498 			break;
499 		}
500 		break;
501 	default:
502 		break;
503 	}
504 out:
505 	mutex_exit(&pd->pd_lock);
506 	KCF_PROV_REFRELE(pd);
507 }
508 
509 /*
510  * This routine is used to notify the framework the result of
511  * an asynchronous request handled by a provider. Valid error
512  * codes are the same as the CRYPTO_* errors defined in common.h.
513  *
514  * This routine can be called from user or interrupt context.
515  */
516 void
517 crypto_op_notification(crypto_req_handle_t handle, int error)
518 {
519 	kcf_call_type_t ctype;
520 
521 	if (handle == NULL)
522 		return;
523 
524 	if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
525 		kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
526 
527 		if (error != CRYPTO_SUCCESS)
528 			sreq->sn_provider->pd_sched_info.ks_nfails++;
529 		KCF_PROV_IREFRELE(sreq->sn_provider);
530 		kcf_sop_done(sreq, error);
531 	} else {
532 		kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
533 
534 		ASSERT(ctype == CRYPTO_ASYNCH);
535 		if (error != CRYPTO_SUCCESS)
536 			areq->an_provider->pd_sched_info.ks_nfails++;
537 		KCF_PROV_IREFRELE(areq->an_provider);
538 		kcf_aop_done(areq, error);
539 	}
540 }
541 
542 /*
543  * This routine is used by software providers to determine
544  * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
545  * Note that hardware providers can always use KM_SLEEP. So,
546  * they do not need to call this routine.
547  *
548  * This routine can be called from user or interrupt context.
549  */
550 int
551 crypto_kmflag(crypto_req_handle_t handle)
552 {
553 	return (REQHNDL2_KMFLAG(handle));
554 }
555 
556 /*
557  * Process the mechanism info structures specified by the provider
558  * during registration. A NULL crypto_provider_info_t indicates
559  * an already initialized provider descriptor.
560  *
561  * Mechanisms are not added to the kernel's mechanism table if the
562  * provider is a logical provider.
563  *
564  * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
565  * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
566  * if the table of mechanisms is full.
567  */
568 static int
569 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
570 {
571 	uint_t mech_idx;
572 	uint_t cleanup_idx;
573 	int err = CRYPTO_SUCCESS;
574 	kcf_prov_mech_desc_t *pmd;
575 	int desc_use_count = 0;
576 	int mcount = desc->pd_mech_list_count;
577 
578 	if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
579 		if (info != NULL) {
580 			ASSERT(info->pi_mechanisms != NULL);
581 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
582 			    sizeof (crypto_mech_info_t) * mcount);
583 		}
584 		return (CRYPTO_SUCCESS);
585 	}
586 
587 	/*
588 	 * Copy the mechanism list from the provider info to the provider
589 	 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
590 	 * element if the provider has random_ops since we keep an internal
591 	 * mechanism, SUN_RANDOM, in this case.
592 	 */
593 	if (info != NULL) {
594 		if (info->pi_ops_vector->co_random_ops != NULL) {
595 			crypto_mech_info_t *rand_mi;
596 
597 			/*
598 			 * Need the following check as it is possible to have
599 			 * a provider that implements just random_ops and has
600 			 * pi_mechanisms == NULL.
601 			 */
602 			if (info->pi_mechanisms != NULL) {
603 				bcopy(info->pi_mechanisms, desc->pd_mechanisms,
604 				    sizeof (crypto_mech_info_t) * (mcount - 1));
605 			}
606 			rand_mi = &desc->pd_mechanisms[mcount - 1];
607 
608 			bzero(rand_mi, sizeof (crypto_mech_info_t));
609 			(void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
610 			    CRYPTO_MAX_MECH_NAME);
611 			rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
612 		} else {
613 			ASSERT(info->pi_mechanisms != NULL);
614 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
615 			    sizeof (crypto_mech_info_t) * mcount);
616 		}
617 	}
618 
619 	/*
620 	 * For each mechanism support by the provider, add the provider
621 	 * to the corresponding KCF mechanism mech_entry chain.
622 	 */
623 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
624 		crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
625 
626 		if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
627 		    (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
628 			err = CRYPTO_ARGUMENTS_BAD;
629 			break;
630 		}
631 
632 		if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
633 		    mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
634 			/*
635 			 * We ask the provider to specify the limit
636 			 * per hash mechanism. But, in practice, a
637 			 * hardware limitation means all hash mechanisms
638 			 * will have the same maximum size allowed for
639 			 * input data. So, we make it a per provider
640 			 * limit to keep it simple.
641 			 */
642 			if (mi->cm_max_input_length == 0) {
643 				err = CRYPTO_ARGUMENTS_BAD;
644 				break;
645 			} else {
646 				desc->pd_hash_limit = mi->cm_max_input_length;
647 			}
648 		}
649 
650 		if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
651 		    KCF_SUCCESS)
652 			break;
653 
654 		if (pmd == NULL)
655 			continue;
656 
657 		/* The provider will be used for this mechanism */
658 		desc_use_count++;
659 	}
660 
661 	/*
662 	 * Don't allow multiple software providers with disabled mechanisms
663 	 * to register. Subsequent enabling of mechanisms will result in
664 	 * an unsupported configuration, i.e. multiple software providers
665 	 * per mechanism.
666 	 */
667 	if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
668 		return (CRYPTO_ARGUMENTS_BAD);
669 
670 	if (err == KCF_SUCCESS)
671 		return (CRYPTO_SUCCESS);
672 
673 	/*
674 	 * An error occurred while adding the mechanism, cleanup
675 	 * and bail.
676 	 */
677 	for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
678 		kcf_remove_mech_provider(
679 		    desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
680 	}
681 
682 	if (err == KCF_MECH_TAB_FULL)
683 		return (CRYPTO_HOST_MEMORY);
684 
685 	return (CRYPTO_ARGUMENTS_BAD);
686 }
687 
688 /*
689  * Update routine for kstat. Only privileged users are allowed to
690  * access this information, since this information is sensitive.
691  * There are some cryptographic attacks (e.g. traffic analysis)
692  * which can use this information.
693  */
694 static int
695 kcf_prov_kstat_update(kstat_t *ksp, int rw)
696 {
697 	kcf_prov_stats_t *ks_data;
698 	kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
699 
700 	if (rw == KSTAT_WRITE)
701 		return (EACCES);
702 
703 	ks_data = ksp->ks_data;
704 
705 	ks_data->ps_ops_total.value.ui64 = pd->pd_sched_info.ks_ndispatches;
706 	ks_data->ps_ops_failed.value.ui64 = pd->pd_sched_info.ks_nfails;
707 	ks_data->ps_ops_busy_rval.value.ui64 = pd->pd_sched_info.ks_nbusy_rval;
708 	ks_data->ps_ops_passed.value.ui64 =
709 	    pd->pd_sched_info.ks_ndispatches -
710 	    pd->pd_sched_info.ks_nfails -
711 	    pd->pd_sched_info.ks_nbusy_rval;
712 
713 	return (0);
714 }
715 
716 
717 /*
718  * Utility routine called from failure paths in crypto_register_provider()
719  * and from crypto_load_soft_disabled().
720  */
721 void
722 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
723 {
724 	uint_t mech_idx;
725 
726 	/* remove the provider from the mechanisms tables */
727 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
728 	    mech_idx++) {
729 		kcf_remove_mech_provider(
730 		    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
731 	}
732 
733 	/* remove provider from providers table */
734 	if (remove_prov)
735 		(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
736 }
737 
738 /*
739  * Utility routine called from crypto_load_soft_disabled(). Callers
740  * should have done a prior undo_register_provider().
741  */
742 void
743 redo_register_provider(kcf_provider_desc_t *pd)
744 {
745 	/* process the mechanisms supported by the provider */
746 	(void) init_prov_mechs(NULL, pd);
747 
748 	/*
749 	 * Hold provider in providers table. We should not call
750 	 * kcf_prov_tab_add_provider() here as the provider descriptor
751 	 * is still valid which means it has an entry in the provider
752 	 * table.
753 	 */
754 	KCF_PROV_REFHOLD(pd);
755 	KCF_PROV_IREFHOLD(pd);
756 }
757 
758 /*
759  * Add provider (p1) to another provider's array of providers (p2).
760  * Hardware and logical providers use this array to cross-reference
761  * each other.
762  */
763 static void
764 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
765 {
766 	kcf_provider_list_t *new;
767 
768 	new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
769 	mutex_enter(&p2->pd_lock);
770 	new->pl_next = p2->pd_provider_list;
771 	p2->pd_provider_list = new;
772 	KCF_PROV_IREFHOLD(p1);
773 	new->pl_provider = p1;
774 	mutex_exit(&p2->pd_lock);
775 }
776 
777 /*
778  * Remove provider (p1) from another provider's array of providers (p2).
779  * Hardware and logical providers use this array to cross-reference
780  * each other.
781  */
782 static void
783 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
784 {
785 
786 	kcf_provider_list_t *pl = NULL, **prev;
787 
788 	mutex_enter(&p2->pd_lock);
789 	for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
790 	    pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
791 		if (pl->pl_provider == p1) {
792 			break;
793 		}
794 	}
795 
796 	if (p1 == NULL) {
797 		mutex_exit(&p2->pd_lock);
798 		return;
799 	}
800 
801 	/* detach and free kcf_provider_list structure */
802 	KCF_PROV_IREFRELE(p1);
803 	*prev = pl->pl_next;
804 	kmem_free(pl, sizeof (*pl));
805 	mutex_exit(&p2->pd_lock);
806 }
807 
808 /*
809  * Convert an array of logical provider handles (crypto_provider_id)
810  * stored in a crypto_provider_info structure into an array of provider
811  * descriptors (kcf_provider_desc_t) attached to a logical provider.
812  */
813 static void
814 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
815 {
816 	kcf_provider_desc_t *lp;
817 	crypto_provider_id_t handle;
818 	int count = info->pi_logical_provider_count;
819 	int i;
820 
821 	/* add hardware provider to each logical provider */
822 	for (i = 0; i < count; i++) {
823 		handle = info->pi_logical_providers[i];
824 		lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
825 		if (lp == NULL) {
826 			continue;
827 		}
828 		add_provider_to_array(hp, lp);
829 		hp->pd_flags |= KCF_LPROV_MEMBER;
830 
831 		/*
832 		 * A hardware provider has to have the provider descriptor of
833 		 * every logical provider it belongs to, so it can be removed
834 		 * from the logical provider if the hardware provider
835 		 * unregisters from the framework.
836 		 */
837 		add_provider_to_array(lp, hp);
838 		KCF_PROV_REFRELE(lp);
839 	}
840 }
841 
842 /*
843  * This routine removes a provider from all of the logical or
844  * hardware providers it belongs to, and frees the provider's
845  * array of pointers to providers.
846  */
847 static void
848 remove_provider(kcf_provider_desc_t *pp)
849 {
850 	kcf_provider_desc_t *p;
851 	kcf_provider_list_t *e, *next;
852 
853 	mutex_enter(&pp->pd_lock);
854 	for (e = pp->pd_provider_list; e != NULL; e = next) {
855 		p = e->pl_provider;
856 		remove_provider_from_array(pp, p);
857 		if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
858 		    p->pd_provider_list == NULL)
859 			p->pd_flags &= ~KCF_LPROV_MEMBER;
860 		KCF_PROV_IREFRELE(p);
861 		next = e->pl_next;
862 		kmem_free(e, sizeof (*e));
863 	}
864 	pp->pd_provider_list = NULL;
865 	mutex_exit(&pp->pd_lock);
866 }
867 
868 /*
869  * Dispatch events as needed for a provider. is_added flag tells
870  * whether the provider is registering or unregistering.
871  */
872 void
873 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
874 {
875 	int i;
876 	crypto_notify_event_change_t ec;
877 
878 	ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
879 
880 	/*
881 	 * Inform interested clients of the mechanisms becoming
882 	 * available/unavailable. We skip this for logical providers
883 	 * as they do not affect mechanisms.
884 	 */
885 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
886 		ec.ec_provider_type = prov_desc->pd_prov_type;
887 		ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
888 		    CRYPTO_MECH_REMOVED;
889 		for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
890 			(void) strlcpy(ec.ec_mech_name,
891 			    prov_desc->pd_mechanisms[i].cm_mech_name,
892 			    CRYPTO_MAX_MECH_NAME);
893 			kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
894 		}
895 
896 	}
897 
898 	/*
899 	 * Inform interested clients about the new or departing provider.
900 	 * In case of a logical provider, we need to notify the event only
901 	 * for the logical provider and not for the underlying
902 	 * providers which are known by the KCF_LPROV_MEMBER bit.
903 	 */
904 	if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
905 	    (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
906 		kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
907 		    CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
908 	}
909 }
910 
911 static void
912 delete_kstat(kcf_provider_desc_t *desc)
913 {
914 	/* destroy the kstat created for this provider */
915 	if (desc->pd_kstat != NULL) {
916 		kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
917 
918 		/* release reference held by desc->pd_kstat->ks_private */
919 		ASSERT(desc == kspd);
920 		kstat_delete(kspd->pd_kstat);
921 		desc->pd_kstat = NULL;
922 		KCF_PROV_REFRELE(kspd);
923 		KCF_PROV_IREFRELE(kspd);
924 	}
925 }
926