xref: /titanic_51/usr/src/uts/common/crypto/spi/kcf_spi.c (revision e07d9cb85217949d497b02d7211de8a197d2f2eb)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * This file is part of the core Kernel Cryptographic Framework.
30  * It implements the SPI functions exported to cryptographic
31  * providers.
32  */
33 
34 #include <sys/ksynch.h>
35 #include <sys/cmn_err.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/modctl.h>
39 #include <sys/crypto/common.h>
40 #include <sys/crypto/impl.h>
41 #include <sys/crypto/sched_impl.h>
42 #include <sys/crypto/spi.h>
43 #include <sys/taskq.h>
44 #include <sys/disp.h>
45 #include <sys/kstat.h>
46 #include <sys/policy.h>
47 #include <sys/cpuvar.h>
48 
49 /*
50  * minalloc and maxalloc values to be used for taskq_create().
51  */
52 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
53 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
54 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
55 
56 static void free_provider_list(kcf_provider_list_t *);
57 static void remove_provider(kcf_provider_desc_t *);
58 static void process_logical_providers(crypto_provider_info_t *,
59     kcf_provider_desc_t *);
60 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
61 static int kcf_prov_kstat_update(kstat_t *, int);
62 static void undo_register_provider_extra(kcf_provider_desc_t *);
63 static void delete_kstat(kcf_provider_desc_t *);
64 
65 static kcf_prov_stats_t kcf_stats_ks_data_template = {
66 	{ "kcf_ops_total",		KSTAT_DATA_UINT64 },
67 	{ "kcf_ops_passed",		KSTAT_DATA_UINT64 },
68 	{ "kcf_ops_failed",		KSTAT_DATA_UINT64 },
69 	{ "kcf_ops_returned_busy",	KSTAT_DATA_UINT64 }
70 };
71 
72 #define	KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
73 	*((dst)->ops) = *((src)->ops);
74 
75 /*
76  * Copy an ops vector from src to dst. Used during provider registration
77  * to copy the ops vector from the provider info structure to the
78  * provider descriptor maintained by KCF.
79  * Copying the ops vector specified by the provider is needed since the
80  * framework does not require the provider info structure to be
81  * persistent.
82  */
83 static void
84 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
85 {
86 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
87 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
88 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
89 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
90 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
91 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
92 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
93 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
94 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
95 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
96 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
97 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
98 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
99 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
100 }
101 
102 static void
103 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
104 {
105 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
106 }
107 
108 static void
109 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
110 {
111 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
112 }
113 
114 /*
115  * This routine is used to add cryptographic providers to the KEF framework.
116  * Providers pass a crypto_provider_info structure to crypto_register_provider()
117  * and get back a handle.  The crypto_provider_info structure contains a
118  * list of mechanisms supported by the provider and an ops vector containing
119  * provider entry points.  Hardware providers call this routine in their attach
120  * routines.  Software providers call this routine in their _init() routine.
121  */
122 int
123 crypto_register_provider(crypto_provider_info_t *info,
124     crypto_kcf_provider_handle_t *handle)
125 {
126 	int need_verify;
127 	struct modctl *mcp;
128 	char *name;
129 	char ks_name[KSTAT_STRLEN];
130 
131 	kcf_provider_desc_t *prov_desc = NULL;
132 	int ret = CRYPTO_ARGUMENTS_BAD;
133 
134 	if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
135 		return (CRYPTO_VERSION_MISMATCH);
136 
137 	/*
138 	 * Check provider type, must be software, hardware, or logical.
139 	 */
140 	if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
141 	    info->pi_provider_type != CRYPTO_SW_PROVIDER &&
142 	    info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
143 		return (CRYPTO_ARGUMENTS_BAD);
144 
145 	/*
146 	 * Allocate and initialize a new provider descriptor. We also
147 	 * hold it and release it when done.
148 	 */
149 	prov_desc = kcf_alloc_provider_desc(info);
150 	KCF_PROV_REFHOLD(prov_desc);
151 
152 	prov_desc->pd_prov_type = info->pi_provider_type;
153 
154 	/* provider-private handle, opaque to KCF */
155 	prov_desc->pd_prov_handle = info->pi_provider_handle;
156 
157 	/* copy provider description string */
158 	if (info->pi_provider_description != NULL) {
159 		/*
160 		 * pi_provider_descriptor is a string that can contain
161 		 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
162 		 * INCLUDING the terminating null character. A bcopy()
163 		 * is necessary here as pd_description should not have
164 		 * a null character. See comments in kcf_alloc_provider_desc()
165 		 * for details on pd_description field.
166 		 */
167 		bcopy(info->pi_provider_description, prov_desc->pd_description,
168 		    min(strlen(info->pi_provider_description),
169 		    CRYPTO_PROVIDER_DESCR_MAX_LEN));
170 	}
171 
172 	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
173 		if (info->pi_ops_vector == NULL) {
174 			goto bail;
175 		}
176 		copy_ops_vector_v1(info->pi_ops_vector,
177 		    prov_desc->pd_ops_vector);
178 		if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
179 			copy_ops_vector_v2(info->pi_ops_vector,
180 			    prov_desc->pd_ops_vector);
181 			prov_desc->pd_flags = info->pi_flags;
182 		}
183 		if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
184 			copy_ops_vector_v3(info->pi_ops_vector,
185 			    prov_desc->pd_ops_vector);
186 		}
187 	}
188 
189 	/* object_ops and nostore_key_ops are mutually exclusive */
190 	if (prov_desc->pd_ops_vector->co_object_ops &&
191 	    prov_desc->pd_ops_vector->co_nostore_key_ops) {
192 		goto bail;
193 	}
194 	/*
195 	 * For software providers, copy the module name and module ID.
196 	 * For hardware providers, copy the driver name and instance.
197 	 */
198 	switch (info->pi_provider_type) {
199 	case  CRYPTO_SW_PROVIDER:
200 		if (info->pi_provider_dev.pd_sw == NULL)
201 			goto bail;
202 
203 		if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL)
204 			goto bail;
205 
206 		prov_desc->pd_module_id = mcp->mod_id;
207 		name = mcp->mod_modname;
208 		break;
209 
210 	case CRYPTO_HW_PROVIDER:
211 	case CRYPTO_LOGICAL_PROVIDER:
212 		if (info->pi_provider_dev.pd_hw == NULL)
213 			goto bail;
214 
215 		prov_desc->pd_instance =
216 		    ddi_get_instance(info->pi_provider_dev.pd_hw);
217 		name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw);
218 		break;
219 	}
220 	if (name == NULL)
221 		goto bail;
222 
223 	prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
224 	(void) strcpy(prov_desc->pd_name, name);
225 
226 	if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL)
227 		goto bail;
228 
229 	/* process the mechanisms supported by the provider */
230 	if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
231 		goto bail;
232 
233 	/*
234 	 * Add provider to providers tables, also sets the descriptor
235 	 * pd_prov_id field.
236 	 */
237 	if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
238 		undo_register_provider(prov_desc, B_FALSE);
239 		goto bail;
240 	}
241 
242 	if ((need_verify = kcf_need_signature_verification(prov_desc)) == -1) {
243 		undo_register_provider(prov_desc, B_TRUE);
244 		ret = CRYPTO_MODVERIFICATION_FAILED;
245 		goto bail;
246 	}
247 
248 	/*
249 	 * We create a taskq only for a hardware provider. The global
250 	 * software queue is used for software providers. We handle ordering
251 	 * of multi-part requests in the taskq routine. So, it is safe to
252 	 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
253 	 * to keep some entries cached to improve performance.
254 	 */
255 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
256 		prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
257 		    crypto_taskq_threads, minclsyspri,
258 		    crypto_taskq_minalloc, crypto_taskq_maxalloc,
259 		    TASKQ_PREPOPULATE);
260 	else
261 		prov_desc->pd_sched_info.ks_taskq = NULL;
262 
263 	/* no kernel session to logical providers */
264 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
265 		/*
266 		 * Open a session for session-oriented providers. This session
267 		 * is used for all kernel consumers. This is fine as a provider
268 		 * is required to support multiple thread access to a session.
269 		 * We can do this only after the taskq has been created as we
270 		 * do a kcf_submit_request() to open the session.
271 		 */
272 		if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
273 			kcf_req_params_t params;
274 
275 			KCF_WRAP_SESSION_OPS_PARAMS(&params,
276 			    KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
277 			    CRYPTO_USER, NULL, 0, prov_desc);
278 			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
279 			    B_FALSE);
280 
281 			if (ret != CRYPTO_SUCCESS) {
282 				undo_register_provider(prov_desc, B_TRUE);
283 				ret = CRYPTO_FAILED;
284 				goto bail;
285 			}
286 		}
287 	}
288 
289 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
290 		/*
291 		 * Create the kstat for this provider. There is a kstat
292 		 * installed for each successfully registered provider.
293 		 * This kstat is deleted, when the provider unregisters.
294 		 */
295 		if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
296 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
297 			    prov_desc->pd_name, "provider_stats");
298 		} else {
299 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
300 			    prov_desc->pd_name, prov_desc->pd_instance,
301 			    prov_desc->pd_prov_id, "provider_stats");
302 		}
303 
304 		prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
305 		    KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
306 		    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
307 
308 		if (prov_desc->pd_kstat != NULL) {
309 			bcopy(&kcf_stats_ks_data_template,
310 			    &prov_desc->pd_ks_data,
311 			    sizeof (kcf_stats_ks_data_template));
312 			prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
313 			KCF_PROV_REFHOLD(prov_desc);
314 			KCF_PROV_IREFHOLD(prov_desc);
315 			prov_desc->pd_kstat->ks_private = prov_desc;
316 			prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
317 			kstat_install(prov_desc->pd_kstat);
318 		}
319 	}
320 
321 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
322 		process_logical_providers(info, prov_desc);
323 
324 	if (need_verify == 1) {
325 		/* kcf_verify_signature routine will release these holds */
326 		KCF_PROV_REFHOLD(prov_desc);
327 		KCF_PROV_IREFHOLD(prov_desc);
328 
329 		if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) {
330 			/*
331 			 * It is not safe to make the door upcall to kcfd from
332 			 * this context since the kcfd thread could reenter
333 			 * devfs. So, we dispatch a taskq job to do the
334 			 * verification and return to the provider.
335 			 */
336 			(void) taskq_dispatch(system_taskq,
337 			    kcf_verify_signature, (void *)prov_desc, TQ_SLEEP);
338 		} else if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
339 			kcf_verify_signature(prov_desc);
340 			if (prov_desc->pd_state ==
341 			    KCF_PROV_VERIFICATION_FAILED) {
342 				undo_register_provider_extra(prov_desc);
343 				ret = CRYPTO_MODVERIFICATION_FAILED;
344 				goto bail;
345 			}
346 		}
347 	} else {
348 		mutex_enter(&prov_desc->pd_lock);
349 		prov_desc->pd_state = KCF_PROV_READY;
350 		mutex_exit(&prov_desc->pd_lock);
351 		kcf_do_notify(prov_desc, B_TRUE);
352 	}
353 
354 	*handle = prov_desc->pd_kcf_prov_handle;
355 	ret = CRYPTO_SUCCESS;
356 
357 bail:
358 	KCF_PROV_REFRELE(prov_desc);
359 	return (ret);
360 }
361 
362 /*
363  * This routine is used to notify the framework when a provider is being
364  * removed.  Hardware providers call this routine in their detach routines.
365  * Software providers call this routine in their _fini() routine.
366  */
367 int
368 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
369 {
370 	uint_t mech_idx;
371 	kcf_provider_desc_t *desc;
372 	kcf_prov_state_t saved_state;
373 
374 	/* lookup provider descriptor */
375 	if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
376 		return (CRYPTO_UNKNOWN_PROVIDER);
377 
378 	mutex_enter(&desc->pd_lock);
379 	/*
380 	 * Check if any other thread is disabling or removing
381 	 * this provider. We return if this is the case.
382 	 */
383 	if (desc->pd_state >= KCF_PROV_DISABLED) {
384 		mutex_exit(&desc->pd_lock);
385 		/* Release reference held by kcf_prov_tab_lookup(). */
386 		KCF_PROV_REFRELE(desc);
387 		return (CRYPTO_BUSY);
388 	}
389 
390 	saved_state = desc->pd_state;
391 	desc->pd_state = KCF_PROV_REMOVED;
392 
393 	if (saved_state == KCF_PROV_BUSY) {
394 		/*
395 		 * The per-provider taskq thread may be waiting. We
396 		 * signal it so that it can start failing requests.
397 		 * Note that we do not need a cv_broadcast() as we keep
398 		 * only a single thread per taskq.
399 		 */
400 		cv_signal(&desc->pd_resume_cv);
401 	}
402 
403 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
404 		/*
405 		 * Check if this provider is currently being used.
406 		 * pd_irefcnt is the number of holds from the internal
407 		 * structures. We add one to account for the above lookup.
408 		 */
409 		if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
410 			desc->pd_state = saved_state;
411 			mutex_exit(&desc->pd_lock);
412 			/* Release reference held by kcf_prov_tab_lookup(). */
413 			KCF_PROV_REFRELE(desc);
414 			/*
415 			 * The administrator presumably will stop the clients
416 			 * thus removing the holds, when they get the busy
417 			 * return value.  Any retry will succeed then.
418 			 */
419 			return (CRYPTO_BUSY);
420 		}
421 	}
422 	mutex_exit(&desc->pd_lock);
423 
424 	if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
425 		remove_provider(desc);
426 	}
427 
428 	if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
429 		/* remove the provider from the mechanisms tables */
430 		for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
431 		    mech_idx++) {
432 			kcf_remove_mech_provider(
433 			    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
434 		}
435 	}
436 
437 	/* remove provider from providers table */
438 	if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
439 	    CRYPTO_SUCCESS) {
440 		/* Release reference held by kcf_prov_tab_lookup(). */
441 		KCF_PROV_REFRELE(desc);
442 		return (CRYPTO_UNKNOWN_PROVIDER);
443 	}
444 
445 	delete_kstat(desc);
446 
447 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
448 		/* Release reference held by kcf_prov_tab_lookup(). */
449 		KCF_PROV_REFRELE(desc);
450 
451 		/*
452 		 * Wait till the existing requests complete.
453 		 */
454 		mutex_enter(&desc->pd_lock);
455 		while (desc->pd_state != KCF_PROV_FREED)
456 			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
457 		mutex_exit(&desc->pd_lock);
458 	} else {
459 		/*
460 		 * Wait until requests that have been sent to the provider
461 		 * complete.
462 		 */
463 		mutex_enter(&desc->pd_lock);
464 		while (desc->pd_irefcnt > 0)
465 			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
466 		mutex_exit(&desc->pd_lock);
467 	}
468 
469 	kcf_do_notify(desc, B_FALSE);
470 
471 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
472 		/*
473 		 * This is the only place where kcf_free_provider_desc()
474 		 * is called directly. KCF_PROV_REFRELE() should free the
475 		 * structure in all other places.
476 		 */
477 		ASSERT(desc->pd_state == KCF_PROV_FREED &&
478 		    desc->pd_refcnt == 0);
479 		kcf_free_provider_desc(desc);
480 	} else {
481 		KCF_PROV_REFRELE(desc);
482 	}
483 
484 	return (CRYPTO_SUCCESS);
485 }
486 
487 /*
488  * This routine is used to notify the framework that the state of
489  * a cryptographic provider has changed. Valid state codes are:
490  *
491  * CRYPTO_PROVIDER_READY
492  * 	The provider indicates that it can process more requests. A provider
493  *	will notify with this event if it previously has notified us with a
494  *	CRYPTO_PROVIDER_BUSY.
495  *
496  * CRYPTO_PROVIDER_BUSY
497  * 	The provider can not take more requests.
498  *
499  * CRYPTO_PROVIDER_FAILED
500  *	The provider encountered an internal error. The framework will not
501  * 	be sending any more requests to the provider. The provider may notify
502  *	with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
503  *
504  * This routine can be called from user or interrupt context.
505  */
506 void
507 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
508 {
509 	kcf_provider_desc_t *pd;
510 
511 	/* lookup the provider from the given handle */
512 	if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
513 		return;
514 
515 	mutex_enter(&pd->pd_lock);
516 
517 	if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
518 		goto out;
519 
520 	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
521 		cmn_err(CE_WARN, "crypto_provider_notification: "
522 		    "logical provider (%x) ignored\n", handle);
523 		goto out;
524 	}
525 	switch (state) {
526 	case CRYPTO_PROVIDER_READY:
527 		switch (pd->pd_state) {
528 		case KCF_PROV_BUSY:
529 			pd->pd_state = KCF_PROV_READY;
530 			/*
531 			 * Signal the per-provider taskq thread that it
532 			 * can start submitting requests. Note that we do
533 			 * not need a cv_broadcast() as we keep only a
534 			 * single thread per taskq.
535 			 */
536 			cv_signal(&pd->pd_resume_cv);
537 			break;
538 
539 		case KCF_PROV_FAILED:
540 			/*
541 			 * The provider recovered from the error. Let us
542 			 * use it now.
543 			 */
544 			pd->pd_state = KCF_PROV_READY;
545 			break;
546 		}
547 		break;
548 
549 	case CRYPTO_PROVIDER_BUSY:
550 		switch (pd->pd_state) {
551 		case KCF_PROV_READY:
552 			pd->pd_state = KCF_PROV_BUSY;
553 			break;
554 		}
555 		break;
556 
557 	case CRYPTO_PROVIDER_FAILED:
558 		/*
559 		 * We note the failure and return. The per-provider taskq
560 		 * thread checks this flag and starts failing the
561 		 * requests, if it is set. See process_req_hwp() for details.
562 		 */
563 		switch (pd->pd_state) {
564 		case KCF_PROV_READY:
565 			pd->pd_state = KCF_PROV_FAILED;
566 			break;
567 
568 		case KCF_PROV_BUSY:
569 			pd->pd_state = KCF_PROV_FAILED;
570 			/*
571 			 * The per-provider taskq thread may be waiting. We
572 			 * signal it so that it can start failing requests.
573 			 */
574 			cv_signal(&pd->pd_resume_cv);
575 			break;
576 		}
577 		break;
578 	}
579 out:
580 	mutex_exit(&pd->pd_lock);
581 	KCF_PROV_REFRELE(pd);
582 }
583 
584 /*
585  * This routine is used to notify the framework the result of
586  * an asynchronous request handled by a provider. Valid error
587  * codes are the same as the CRYPTO_* errors defined in common.h.
588  *
589  * This routine can be called from user or interrupt context.
590  */
591 void
592 crypto_op_notification(crypto_req_handle_t handle, int error)
593 {
594 	kcf_call_type_t ctype;
595 
596 	if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
597 		kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
598 
599 		if (error != CRYPTO_SUCCESS)
600 			sreq->sn_provider->pd_sched_info.ks_nfails++;
601 		KCF_PROV_IREFRELE(sreq->sn_provider);
602 		kcf_sop_done(sreq, error);
603 	} else {
604 		kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
605 
606 		ASSERT(ctype == CRYPTO_ASYNCH);
607 		if (error != CRYPTO_SUCCESS)
608 			areq->an_provider->pd_sched_info.ks_nfails++;
609 		KCF_PROV_IREFRELE(areq->an_provider);
610 		kcf_aop_done(areq, error);
611 	}
612 }
613 
614 /*
615  * This routine is used by software providers to determine
616  * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
617  * Note that hardware providers can always use KM_SLEEP. So,
618  * they do not need to call this routine.
619  *
620  * This routine can be called from user or interrupt context.
621  */
622 int
623 crypto_kmflag(crypto_req_handle_t handle)
624 {
625 	return (REQHNDL2_KMFLAG(handle));
626 }
627 
628 /*
629  * Process the mechanism info structures specified by the provider
630  * during registration. A NULL crypto_provider_info_t indicates
631  * an already initialized provider descriptor.
632  *
633  * Mechanisms are not added to the kernel's mechanism table if the
634  * provider is a logical provider.
635  *
636  * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
637  * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
638  * if the table of mechanisms is full.
639  */
640 static int
641 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
642 {
643 	uint_t mech_idx;
644 	uint_t cleanup_idx;
645 	int err = CRYPTO_SUCCESS;
646 	kcf_prov_mech_desc_t *pmd;
647 	int desc_use_count = 0;
648 	int mcount = desc->pd_mech_list_count;
649 
650 	if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
651 		if (info != NULL) {
652 			ASSERT(info->pi_mechanisms != NULL);
653 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
654 			    sizeof (crypto_mech_info_t) * mcount);
655 		}
656 		return (CRYPTO_SUCCESS);
657 	}
658 
659 	/*
660 	 * Copy the mechanism list from the provider info to the provider
661 	 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
662 	 * element if the provider has random_ops since we keep an internal
663 	 * mechanism, SUN_RANDOM, in this case.
664 	 */
665 	if (info != NULL) {
666 		if (info->pi_ops_vector->co_random_ops != NULL) {
667 			crypto_mech_info_t *rand_mi;
668 
669 			/*
670 			 * Need the following check as it is possible to have
671 			 * a provider that implements just random_ops and has
672 			 * pi_mechanisms == NULL.
673 			 */
674 			if (info->pi_mechanisms != NULL) {
675 				bcopy(info->pi_mechanisms, desc->pd_mechanisms,
676 				    sizeof (crypto_mech_info_t) * (mcount - 1));
677 			}
678 			rand_mi = &desc->pd_mechanisms[mcount - 1];
679 
680 			bzero(rand_mi, sizeof (crypto_mech_info_t));
681 			(void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
682 			    CRYPTO_MAX_MECH_NAME);
683 			rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
684 		} else {
685 			ASSERT(info->pi_mechanisms != NULL);
686 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
687 			    sizeof (crypto_mech_info_t) * mcount);
688 		}
689 	}
690 
691 	/*
692 	 * For each mechanism support by the provider, add the provider
693 	 * to the corresponding KCF mechanism mech_entry chain.
694 	 */
695 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
696 		crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
697 
698 		if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
699 		    (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
700 			err = CRYPTO_ARGUMENTS_BAD;
701 			break;
702 		}
703 
704 		if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
705 		    mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
706 			/*
707 			 * We ask the provider to specify the limit
708 			 * per hash mechanism. But, in practice, a
709 			 * hardware limitation means all hash mechanisms
710 			 * will have the same maximum size allowed for
711 			 * input data. So, we make it a per provider
712 			 * limit to keep it simple.
713 			 */
714 			if (mi->cm_max_input_length == 0) {
715 				err = CRYPTO_ARGUMENTS_BAD;
716 				break;
717 			} else {
718 				desc->pd_hash_limit = mi->cm_max_input_length;
719 			}
720 		}
721 
722 		if (kcf_add_mech_provider(mech_idx, desc, &pmd) != KCF_SUCCESS)
723 			break;
724 
725 		if (pmd == NULL)
726 			continue;
727 
728 		/* The provider will be used for this mechanism */
729 		desc_use_count++;
730 	}
731 
732 	/*
733 	 * Don't allow multiple software providers with disabled mechanisms
734 	 * to register. Subsequent enabling of mechanisms will result in
735 	 * an unsupported configuration, i.e. multiple software providers
736 	 * per mechanism.
737 	 */
738 	if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
739 		return (CRYPTO_ARGUMENTS_BAD);
740 
741 	if (err == KCF_SUCCESS)
742 		return (CRYPTO_SUCCESS);
743 
744 	/*
745 	 * An error occurred while adding the mechanism, cleanup
746 	 * and bail.
747 	 */
748 	for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
749 		kcf_remove_mech_provider(
750 		    desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
751 	}
752 
753 	if (err == KCF_MECH_TAB_FULL)
754 		return (CRYPTO_HOST_MEMORY);
755 
756 	return (CRYPTO_ARGUMENTS_BAD);
757 }
758 
759 /*
760  * Update routine for kstat. Only privileged users are allowed to
761  * access this information, since this information is sensitive.
762  * There are some cryptographic attacks (e.g. traffic analysis)
763  * which can use this information.
764  */
765 static int
766 kcf_prov_kstat_update(kstat_t *ksp, int rw)
767 {
768 	kcf_prov_stats_t *ks_data;
769 	kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
770 
771 	if (rw == KSTAT_WRITE)
772 		return (EACCES);
773 
774 	ks_data = ksp->ks_data;
775 
776 	if (secpolicy_sys_config(CRED(), B_TRUE) != 0) {
777 		ks_data->ps_ops_total.value.ui64 = 0;
778 		ks_data->ps_ops_passed.value.ui64 = 0;
779 		ks_data->ps_ops_failed.value.ui64 = 0;
780 		ks_data->ps_ops_busy_rval.value.ui64 = 0;
781 	} else {
782 		ks_data->ps_ops_total.value.ui64 =
783 		    pd->pd_sched_info.ks_ndispatches;
784 		ks_data->ps_ops_failed.value.ui64 =
785 		    pd->pd_sched_info.ks_nfails;
786 		ks_data->ps_ops_busy_rval.value.ui64 =
787 		    pd->pd_sched_info.ks_nbusy_rval;
788 		ks_data->ps_ops_passed.value.ui64 =
789 		    pd->pd_sched_info.ks_ndispatches -
790 		    pd->pd_sched_info.ks_nfails -
791 		    pd->pd_sched_info.ks_nbusy_rval;
792 	}
793 
794 	return (0);
795 }
796 
797 
798 /*
799  * Utility routine called from failure paths in crypto_register_provider()
800  * and from crypto_load_soft_disabled().
801  */
802 void
803 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
804 {
805 	uint_t mech_idx;
806 
807 	/* remove the provider from the mechanisms tables */
808 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
809 	    mech_idx++) {
810 		kcf_remove_mech_provider(
811 		    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
812 	}
813 
814 	/* remove provider from providers table */
815 	if (remove_prov)
816 		(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
817 }
818 
819 static void
820 undo_register_provider_extra(kcf_provider_desc_t *desc)
821 {
822 	delete_kstat(desc);
823 	undo_register_provider(desc, B_TRUE);
824 }
825 
826 /*
827  * Utility routine called from crypto_load_soft_disabled(). Callers
828  * should have done a prior undo_register_provider().
829  */
830 void
831 redo_register_provider(kcf_provider_desc_t *pd)
832 {
833 	/* process the mechanisms supported by the provider */
834 	(void) init_prov_mechs(NULL, pd);
835 
836 	/*
837 	 * Hold provider in providers table. We should not call
838 	 * kcf_prov_tab_add_provider() here as the provider descriptor
839 	 * is still valid which means it has an entry in the provider
840 	 * table.
841 	 */
842 	KCF_PROV_REFHOLD(pd);
843 	KCF_PROV_IREFHOLD(pd);
844 }
845 
846 /*
847  * Add provider (p1) to another provider's array of providers (p2).
848  * Hardware and logical providers use this array to cross-reference
849  * each other.
850  */
851 static void
852 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
853 {
854 	kcf_provider_list_t *new;
855 
856 	new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
857 	mutex_enter(&p2->pd_lock);
858 	new->pl_next = p2->pd_provider_list;
859 	p2->pd_provider_list = new;
860 	KCF_PROV_IREFHOLD(p1);
861 	new->pl_provider = p1;
862 	mutex_exit(&p2->pd_lock);
863 }
864 
865 /*
866  * Remove provider (p1) from another provider's array of providers (p2).
867  * Hardware and logical providers use this array to cross-reference
868  * each other.
869  */
870 static void
871 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
872 {
873 
874 	kcf_provider_list_t *pl = NULL, **prev;
875 
876 	mutex_enter(&p2->pd_lock);
877 	for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
878 	    pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
879 		if (pl->pl_provider == p1) {
880 			break;
881 		}
882 	}
883 
884 	if (p1 == NULL) {
885 		mutex_exit(&p2->pd_lock);
886 		return;
887 	}
888 
889 	/* detach and free kcf_provider_list structure */
890 	KCF_PROV_IREFRELE(p1);
891 	*prev = pl->pl_next;
892 	kmem_free(pl, sizeof (*pl));
893 	mutex_exit(&p2->pd_lock);
894 }
895 
896 /*
897  * Convert an array of logical provider handles (crypto_provider_id)
898  * stored in a crypto_provider_info structure into an array of provider
899  * descriptors (kcf_provider_desc_t) attached to a logical provider.
900  */
901 static void
902 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
903 {
904 	kcf_provider_desc_t *lp;
905 	crypto_provider_id_t handle;
906 	int count = info->pi_logical_provider_count;
907 	int i;
908 
909 	/* add hardware provider to each logical provider */
910 	for (i = 0; i < count; i++) {
911 		handle = info->pi_logical_providers[i];
912 		lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
913 		if (lp == NULL) {
914 			continue;
915 		}
916 		add_provider_to_array(hp, lp);
917 		hp->pd_flags |= KCF_LPROV_MEMBER;
918 
919 		/*
920 		 * A hardware provider has to have the provider descriptor of
921 		 * every logical provider it belongs to, so it can be removed
922 		 * from the logical provider if the hardware provider
923 		 * unregisters from the framework.
924 		 */
925 		add_provider_to_array(lp, hp);
926 		KCF_PROV_REFRELE(lp);
927 	}
928 }
929 
930 /*
931  * This routine removes a provider from all of the logical or
932  * hardware providers it belongs to, and frees the provider's
933  * array of pointers to providers.
934  */
935 static void
936 remove_provider(kcf_provider_desc_t *pp)
937 {
938 	kcf_provider_desc_t *p;
939 	kcf_provider_list_t *e, *next;
940 
941 	mutex_enter(&pp->pd_lock);
942 	for (e = pp->pd_provider_list; e != NULL; e = next) {
943 		p = e->pl_provider;
944 		remove_provider_from_array(pp, p);
945 		if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
946 		    p->pd_provider_list == NULL)
947 			p->pd_flags &= ~KCF_LPROV_MEMBER;
948 		KCF_PROV_IREFRELE(p);
949 		next = e->pl_next;
950 		kmem_free(e, sizeof (*e));
951 	}
952 	pp->pd_provider_list = NULL;
953 	mutex_exit(&pp->pd_lock);
954 }
955 
956 /*
957  * Dispatch events as needed for a provider. is_added flag tells
958  * whether the provider is registering or unregistering.
959  */
960 void
961 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
962 {
963 	int i;
964 	crypto_notify_event_change_t ec;
965 
966 	ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
967 
968 	/*
969 	 * Inform interested clients of the mechanisms becoming
970 	 * available/unavailable. We skip this for logical providers
971 	 * as they do not affect mechanisms.
972 	 */
973 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
974 		ec.ec_provider_type = prov_desc->pd_prov_type;
975 		ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
976 		    CRYPTO_MECH_REMOVED;
977 		for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
978 			/* Skip any mechanisms not allowed by the policy */
979 			if (is_mech_disabled(prov_desc,
980 			    prov_desc->pd_mechanisms[i].cm_mech_name))
981 				continue;
982 
983 			(void) strncpy(ec.ec_mech_name,
984 			    prov_desc->pd_mechanisms[i].cm_mech_name,
985 			    CRYPTO_MAX_MECH_NAME);
986 			kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
987 		}
988 
989 	}
990 
991 	/*
992 	 * Inform interested clients about the new or departing provider.
993 	 * In case of a logical provider, we need to notify the event only
994 	 * for the logical provider and not for the underlying
995 	 * providers which are known by the KCF_LPROV_MEMBER bit.
996 	 */
997 	if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
998 	    (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
999 		kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
1000 		    CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
1001 	}
1002 }
1003 
1004 static void
1005 delete_kstat(kcf_provider_desc_t *desc)
1006 {
1007 	/* destroy the kstat created for this provider */
1008 	if (desc->pd_kstat != NULL) {
1009 		kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
1010 
1011 		/* release reference held by desc->pd_kstat->ks_private */
1012 		ASSERT(desc == kspd);
1013 		kstat_delete(kspd->pd_kstat);
1014 		desc->pd_kstat = NULL;
1015 		KCF_PROV_REFRELE(kspd);
1016 		KCF_PROV_IREFRELE(kspd);
1017 	}
1018 }
1019