xref: /titanic_50/usr/src/uts/common/crypto/spi/kcf_spi.c (revision 948f2876ce2a3010558f4f6937e16086ebcd36f2)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * This file is part of the core Kernel Cryptographic Framework.
30  * It implements the SPI functions exported to cryptographic
31  * providers.
32  */
33 
34 #include <sys/ksynch.h>
35 #include <sys/cmn_err.h>
36 #include <sys/ddi.h>
37 #include <sys/sunddi.h>
38 #include <sys/modctl.h>
39 #include <sys/crypto/common.h>
40 #include <sys/crypto/impl.h>
41 #include <sys/crypto/sched_impl.h>
42 #include <sys/crypto/spi.h>
43 #include <sys/taskq.h>
44 #include <sys/disp.h>
45 #include <sys/kstat.h>
46 #include <sys/policy.h>
47 
48 /*
49  * minalloc and maxalloc values to be used for taskq_create().
50  */
51 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
52 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
53 
54 static void free_provider_list(kcf_provider_list_t *);
55 static void remove_provider(kcf_provider_desc_t *);
56 static void process_logical_providers(crypto_provider_info_t *,
57     kcf_provider_desc_t *);
58 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
59 static int kcf_prov_kstat_update(kstat_t *, int);
60 static void undo_register_provider_extra(kcf_provider_desc_t *);
61 static void delete_kstat(kcf_provider_desc_t *);
62 
63 static kcf_prov_stats_t kcf_stats_ks_data_template = {
64 	{ "kcf_ops_total",		KSTAT_DATA_UINT64 },
65 	{ "kcf_ops_passed",		KSTAT_DATA_UINT64 },
66 	{ "kcf_ops_failed",		KSTAT_DATA_UINT64 },
67 	{ "kcf_ops_returned_busy",	KSTAT_DATA_UINT64 }
68 };
69 
70 #define	KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
71 	*((dst)->ops) = *((src)->ops);
72 
73 /*
74  * Copy an ops vector from src to dst. Used during provider registration
75  * to copy the ops vector from the provider info structure to the
76  * provider descriptor maintained by KCF.
77  * Copying the ops vector specified by the provider is needed since the
78  * framework does not require the provider info structure to be
79  * persistent.
80  */
81 static void
82 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
83 {
84 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
85 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
86 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
87 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
88 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
89 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
90 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
91 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
92 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
93 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
94 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
95 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
96 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
97 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
98 }
99 
100 static void
101 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
102 {
103 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
104 }
105 
106 static void
107 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
108 {
109 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
110 }
111 
112 /*
113  * This routine is used to add cryptographic providers to the KEF framework.
114  * Providers pass a crypto_provider_info structure to crypto_register_provider()
115  * and get back a handle.  The crypto_provider_info structure contains a
116  * list of mechanisms supported by the provider and an ops vector containing
117  * provider entry points.  Hardware providers call this routine in their attach
118  * routines.  Software providers call this routine in their _init() routine.
119  */
120 int
121 crypto_register_provider(crypto_provider_info_t *info,
122     crypto_kcf_provider_handle_t *handle)
123 {
124 	int need_verify;
125 	struct modctl *mcp;
126 	char *name;
127 	char ks_name[KSTAT_STRLEN];
128 
129 	kcf_provider_desc_t *prov_desc = NULL;
130 	int ret = CRYPTO_ARGUMENTS_BAD;
131 
132 	if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
133 		return (CRYPTO_VERSION_MISMATCH);
134 
135 	/*
136 	 * Check provider type, must be software, hardware, or logical.
137 	 */
138 	if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
139 	    info->pi_provider_type != CRYPTO_SW_PROVIDER &&
140 	    info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
141 		return (CRYPTO_ARGUMENTS_BAD);
142 
143 	/*
144 	 * Allocate and initialize a new provider descriptor. We also
145 	 * hold it and release it when done.
146 	 */
147 	prov_desc = kcf_alloc_provider_desc(info);
148 	KCF_PROV_REFHOLD(prov_desc);
149 
150 	prov_desc->pd_prov_type = info->pi_provider_type;
151 
152 	/* provider-private handle, opaque to KCF */
153 	prov_desc->pd_prov_handle = info->pi_provider_handle;
154 
155 	/* copy provider description string */
156 	if (info->pi_provider_description != NULL) {
157 		/*
158 		 * pi_provider_descriptor is a string that can contain
159 		 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
160 		 * INCLUDING the terminating null character. A bcopy()
161 		 * is necessary here as pd_description should not have
162 		 * a null character. See comments in kcf_alloc_provider_desc()
163 		 * for details on pd_description field.
164 		 */
165 		bcopy(info->pi_provider_description, prov_desc->pd_description,
166 		    min(strlen(info->pi_provider_description),
167 		    CRYPTO_PROVIDER_DESCR_MAX_LEN));
168 	}
169 
170 	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
171 		if (info->pi_ops_vector == NULL) {
172 			goto bail;
173 		}
174 		copy_ops_vector_v1(info->pi_ops_vector,
175 		    prov_desc->pd_ops_vector);
176 		if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
177 			copy_ops_vector_v2(info->pi_ops_vector,
178 			    prov_desc->pd_ops_vector);
179 			prov_desc->pd_flags = info->pi_flags;
180 		}
181 		if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
182 			copy_ops_vector_v3(info->pi_ops_vector,
183 			    prov_desc->pd_ops_vector);
184 		}
185 	}
186 
187 	/* object_ops and nostore_key_ops are mutually exclusive */
188 	if (prov_desc->pd_ops_vector->co_object_ops &&
189 	    prov_desc->pd_ops_vector->co_nostore_key_ops) {
190 		goto bail;
191 	}
192 	/*
193 	 * For software providers, copy the module name and module ID.
194 	 * For hardware providers, copy the driver name and instance.
195 	 */
196 	switch (info->pi_provider_type) {
197 	case  CRYPTO_SW_PROVIDER:
198 		if (info->pi_provider_dev.pd_sw == NULL)
199 			goto bail;
200 
201 		if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL)
202 			goto bail;
203 
204 		prov_desc->pd_module_id = mcp->mod_id;
205 		name = mcp->mod_modname;
206 		break;
207 
208 	case CRYPTO_HW_PROVIDER:
209 	case CRYPTO_LOGICAL_PROVIDER:
210 		if (info->pi_provider_dev.pd_hw == NULL)
211 			goto bail;
212 
213 		prov_desc->pd_instance =
214 		    ddi_get_instance(info->pi_provider_dev.pd_hw);
215 		name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw);
216 		break;
217 	}
218 	if (name == NULL)
219 		goto bail;
220 
221 	prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
222 	(void) strcpy(prov_desc->pd_name, name);
223 
224 	if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL)
225 		goto bail;
226 
227 	/* process the mechanisms supported by the provider */
228 	if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
229 		goto bail;
230 
231 	/*
232 	 * Add provider to providers tables, also sets the descriptor
233 	 * pd_prov_id field.
234 	 */
235 	if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
236 		undo_register_provider(prov_desc, B_FALSE);
237 		goto bail;
238 	}
239 
240 	if ((need_verify = kcf_need_signature_verification(prov_desc)) == -1) {
241 		undo_register_provider(prov_desc, B_TRUE);
242 		ret = CRYPTO_MODVERIFICATION_FAILED;
243 		goto bail;
244 	}
245 
246 	/*
247 	 * We create a taskq only for a hardware provider. The global
248 	 * software queue is used for software providers. The taskq
249 	 * is limited to one thread since tasks are guaranteed to be
250 	 * executed in the order they are scheduled, if nthreads == 1. We
251 	 * pass TASKQ_PREPOPULATE flag to keep some entries cached to
252 	 * improve performance.
253 	 */
254 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
255 		prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
256 		    1, minclsyspri, crypto_taskq_minalloc,
257 		    crypto_taskq_maxalloc, TASKQ_PREPOPULATE);
258 	else
259 		prov_desc->pd_sched_info.ks_taskq = NULL;
260 
261 	/* no kernel session to logical providers */
262 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
263 		/*
264 		 * Open a session for session-oriented providers. This session
265 		 * is used for all kernel consumers. This is fine as a provider
266 		 * is required to support multiple thread access to a session.
267 		 * We can do this only after the taskq has been created as we
268 		 * do a kcf_submit_request() to open the session.
269 		 */
270 		if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
271 			kcf_req_params_t params;
272 
273 			KCF_WRAP_SESSION_OPS_PARAMS(&params,
274 			    KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
275 			    CRYPTO_USER, NULL, 0, prov_desc);
276 			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
277 			    B_FALSE);
278 
279 			if (ret != CRYPTO_SUCCESS) {
280 				undo_register_provider(prov_desc, B_TRUE);
281 				ret = CRYPTO_FAILED;
282 				goto bail;
283 			}
284 		}
285 	}
286 
287 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
288 		/*
289 		 * Create the kstat for this provider. There is a kstat
290 		 * installed for each successfully registered provider.
291 		 * This kstat is deleted, when the provider unregisters.
292 		 */
293 		if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
294 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
295 			    prov_desc->pd_name, "provider_stats");
296 		} else {
297 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
298 			    prov_desc->pd_name, prov_desc->pd_instance,
299 			    prov_desc->pd_prov_id, "provider_stats");
300 		}
301 
302 		prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
303 		    KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
304 		    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
305 
306 		if (prov_desc->pd_kstat != NULL) {
307 			bcopy(&kcf_stats_ks_data_template,
308 			    &prov_desc->pd_ks_data,
309 			    sizeof (kcf_stats_ks_data_template));
310 			prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
311 			KCF_PROV_REFHOLD(prov_desc);
312 			KCF_PROV_IREFHOLD(prov_desc);
313 			prov_desc->pd_kstat->ks_private = prov_desc;
314 			prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
315 			kstat_install(prov_desc->pd_kstat);
316 		}
317 	}
318 
319 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
320 		process_logical_providers(info, prov_desc);
321 
322 	if (need_verify == 1) {
323 		/* kcf_verify_signature routine will release these holds */
324 		KCF_PROV_REFHOLD(prov_desc);
325 		KCF_PROV_IREFHOLD(prov_desc);
326 
327 		if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) {
328 			/*
329 			 * It is not safe to make the door upcall to kcfd from
330 			 * this context since the kcfd thread could reenter
331 			 * devfs. So, we dispatch a taskq job to do the
332 			 * verification and return to the provider.
333 			 */
334 			(void) taskq_dispatch(system_taskq,
335 			    kcf_verify_signature, (void *)prov_desc, TQ_SLEEP);
336 		} else if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
337 			kcf_verify_signature(prov_desc);
338 			if (prov_desc->pd_state ==
339 			    KCF_PROV_VERIFICATION_FAILED) {
340 				undo_register_provider_extra(prov_desc);
341 				ret = CRYPTO_MODVERIFICATION_FAILED;
342 				goto bail;
343 			}
344 		}
345 	} else {
346 		mutex_enter(&prov_desc->pd_lock);
347 		prov_desc->pd_state = KCF_PROV_READY;
348 		mutex_exit(&prov_desc->pd_lock);
349 		kcf_do_notify(prov_desc, B_TRUE);
350 	}
351 
352 	*handle = prov_desc->pd_kcf_prov_handle;
353 	ret = CRYPTO_SUCCESS;
354 
355 bail:
356 	KCF_PROV_REFRELE(prov_desc);
357 	return (ret);
358 }
359 
360 /*
361  * This routine is used to notify the framework when a provider is being
362  * removed.  Hardware providers call this routine in their detach routines.
363  * Software providers call this routine in their _fini() routine.
364  */
365 int
366 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
367 {
368 	uint_t mech_idx;
369 	kcf_provider_desc_t *desc;
370 	kcf_prov_state_t saved_state;
371 
372 	/* lookup provider descriptor */
373 	if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
374 		return (CRYPTO_UNKNOWN_PROVIDER);
375 
376 	mutex_enter(&desc->pd_lock);
377 	/*
378 	 * Check if any other thread is disabling or removing
379 	 * this provider. We return if this is the case.
380 	 */
381 	if (desc->pd_state >= KCF_PROV_DISABLED) {
382 		mutex_exit(&desc->pd_lock);
383 		/* Release reference held by kcf_prov_tab_lookup(). */
384 		KCF_PROV_REFRELE(desc);
385 		return (CRYPTO_BUSY);
386 	}
387 
388 	saved_state = desc->pd_state;
389 	desc->pd_state = KCF_PROV_REMOVED;
390 
391 	if (saved_state == KCF_PROV_BUSY) {
392 		/*
393 		 * The per-provider taskq thread may be waiting. We
394 		 * signal it so that it can start failing requests.
395 		 * Note that we do not need a cv_broadcast() as we keep
396 		 * only a single thread per taskq.
397 		 */
398 		cv_signal(&desc->pd_resume_cv);
399 	}
400 
401 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
402 		/*
403 		 * Check if this provider is currently being used.
404 		 * pd_irefcnt is the number of holds from the internal
405 		 * structures. We add one to account for the above lookup.
406 		 */
407 		if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
408 			desc->pd_state = saved_state;
409 			mutex_exit(&desc->pd_lock);
410 			/* Release reference held by kcf_prov_tab_lookup(). */
411 			KCF_PROV_REFRELE(desc);
412 			/*
413 			 * The administrator presumably will stop the clients
414 			 * thus removing the holds, when they get the busy
415 			 * return value.  Any retry will succeed then.
416 			 */
417 			return (CRYPTO_BUSY);
418 		}
419 	}
420 	mutex_exit(&desc->pd_lock);
421 
422 	if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
423 		remove_provider(desc);
424 	}
425 
426 	if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
427 		/* remove the provider from the mechanisms tables */
428 		for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
429 		    mech_idx++) {
430 			kcf_remove_mech_provider(
431 			    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
432 		}
433 	}
434 
435 	/* remove provider from providers table */
436 	if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
437 	    CRYPTO_SUCCESS) {
438 		/* Release reference held by kcf_prov_tab_lookup(). */
439 		KCF_PROV_REFRELE(desc);
440 		return (CRYPTO_UNKNOWN_PROVIDER);
441 	}
442 
443 	delete_kstat(desc);
444 
445 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
446 		/* Release reference held by kcf_prov_tab_lookup(). */
447 		KCF_PROV_REFRELE(desc);
448 
449 		/*
450 		 * Wait till the existing requests complete.
451 		 */
452 		mutex_enter(&desc->pd_lock);
453 		while (desc->pd_state != KCF_PROV_FREED)
454 			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
455 		mutex_exit(&desc->pd_lock);
456 	} else {
457 		/*
458 		 * Wait until requests that have been sent to the provider
459 		 * complete.
460 		 */
461 		mutex_enter(&desc->pd_lock);
462 		while (desc->pd_irefcnt > 0)
463 			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
464 		mutex_exit(&desc->pd_lock);
465 	}
466 
467 	kcf_do_notify(desc, B_FALSE);
468 
469 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
470 		/*
471 		 * This is the only place where kcf_free_provider_desc()
472 		 * is called directly. KCF_PROV_REFRELE() should free the
473 		 * structure in all other places.
474 		 */
475 		ASSERT(desc->pd_state == KCF_PROV_FREED &&
476 		    desc->pd_refcnt == 0);
477 		kcf_free_provider_desc(desc);
478 	} else {
479 		KCF_PROV_REFRELE(desc);
480 	}
481 
482 	return (CRYPTO_SUCCESS);
483 }
484 
485 /*
486  * This routine is used to notify the framework that the state of
487  * a cryptographic provider has changed. Valid state codes are:
488  *
489  * CRYPTO_PROVIDER_READY
490  * 	The provider indicates that it can process more requests. A provider
491  *	will notify with this event if it previously has notified us with a
492  *	CRYPTO_PROVIDER_BUSY.
493  *
494  * CRYPTO_PROVIDER_BUSY
495  * 	The provider can not take more requests.
496  *
497  * CRYPTO_PROVIDER_FAILED
498  *	The provider encountered an internal error. The framework will not
499  * 	be sending any more requests to the provider. The provider may notify
500  *	with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
501  *
502  * This routine can be called from user or interrupt context.
503  */
504 void
505 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
506 {
507 	kcf_provider_desc_t *pd;
508 
509 	/* lookup the provider from the given handle */
510 	if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
511 		return;
512 
513 	mutex_enter(&pd->pd_lock);
514 
515 	if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
516 		goto out;
517 
518 	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
519 		cmn_err(CE_WARN, "crypto_provider_notification: "
520 		    "logical provider (%x) ignored\n", handle);
521 		goto out;
522 	}
523 	switch (state) {
524 	case CRYPTO_PROVIDER_READY:
525 		switch (pd->pd_state) {
526 		case KCF_PROV_BUSY:
527 			pd->pd_state = KCF_PROV_READY;
528 			/*
529 			 * Signal the per-provider taskq thread that it
530 			 * can start submitting requests. Note that we do
531 			 * not need a cv_broadcast() as we keep only a
532 			 * single thread per taskq.
533 			 */
534 			cv_signal(&pd->pd_resume_cv);
535 			break;
536 
537 		case KCF_PROV_FAILED:
538 			/*
539 			 * The provider recovered from the error. Let us
540 			 * use it now.
541 			 */
542 			pd->pd_state = KCF_PROV_READY;
543 			break;
544 		}
545 		break;
546 
547 	case CRYPTO_PROVIDER_BUSY:
548 		switch (pd->pd_state) {
549 		case KCF_PROV_READY:
550 			pd->pd_state = KCF_PROV_BUSY;
551 			break;
552 		}
553 		break;
554 
555 	case CRYPTO_PROVIDER_FAILED:
556 		/*
557 		 * We note the failure and return. The per-provider taskq
558 		 * thread checks this flag and starts failing the
559 		 * requests, if it is set. See process_req_hwp() for details.
560 		 */
561 		switch (pd->pd_state) {
562 		case KCF_PROV_READY:
563 			pd->pd_state = KCF_PROV_FAILED;
564 			break;
565 
566 		case KCF_PROV_BUSY:
567 			pd->pd_state = KCF_PROV_FAILED;
568 			/*
569 			 * The per-provider taskq thread may be waiting. We
570 			 * signal it so that it can start failing requests.
571 			 */
572 			cv_signal(&pd->pd_resume_cv);
573 			break;
574 		}
575 		break;
576 	}
577 out:
578 	mutex_exit(&pd->pd_lock);
579 	KCF_PROV_REFRELE(pd);
580 }
581 
582 /*
583  * This routine is used to notify the framework the result of
584  * an asynchronous request handled by a provider. Valid error
585  * codes are the same as the CRYPTO_* errors defined in common.h.
586  *
587  * This routine can be called from user or interrupt context.
588  */
589 void
590 crypto_op_notification(crypto_req_handle_t handle, int error)
591 {
592 	kcf_call_type_t ctype;
593 
594 	if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
595 		kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
596 
597 		if (error != CRYPTO_SUCCESS)
598 			sreq->sn_provider->pd_sched_info.ks_nfails++;
599 		KCF_PROV_IREFRELE(sreq->sn_provider);
600 		kcf_sop_done(sreq, error);
601 	} else {
602 		kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
603 
604 		ASSERT(ctype == CRYPTO_ASYNCH);
605 		if (error != CRYPTO_SUCCESS)
606 			areq->an_provider->pd_sched_info.ks_nfails++;
607 		KCF_PROV_IREFRELE(areq->an_provider);
608 		kcf_aop_done(areq, error);
609 	}
610 }
611 
612 /*
613  * This routine is used by software providers to determine
614  * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
615  * Note that hardware providers can always use KM_SLEEP. So,
616  * they do not need to call this routine.
617  *
618  * This routine can be called from user or interrupt context.
619  */
620 int
621 crypto_kmflag(crypto_req_handle_t handle)
622 {
623 	return (REQHNDL2_KMFLAG(handle));
624 }
625 
626 /*
627  * Process the mechanism info structures specified by the provider
628  * during registration. A NULL crypto_provider_info_t indicates
629  * an already initialized provider descriptor.
630  *
631  * Mechanisms are not added to the kernel's mechanism table if the
632  * provider is a logical provider.
633  *
634  * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
635  * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
636  * if the table of mechanisms is full.
637  */
638 static int
639 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
640 {
641 	uint_t mech_idx;
642 	uint_t cleanup_idx;
643 	int err = CRYPTO_SUCCESS;
644 	kcf_prov_mech_desc_t *pmd;
645 	int desc_use_count = 0;
646 	int mcount = desc->pd_mech_list_count;
647 
648 	if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
649 		if (info != NULL) {
650 			ASSERT(info->pi_mechanisms != NULL);
651 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
652 			    sizeof (crypto_mech_info_t) * mcount);
653 		}
654 		return (CRYPTO_SUCCESS);
655 	}
656 
657 	/*
658 	 * Copy the mechanism list from the provider info to the provider
659 	 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
660 	 * element if the provider has random_ops since we keep an internal
661 	 * mechanism, SUN_RANDOM, in this case.
662 	 */
663 	if (info != NULL) {
664 		if (info->pi_ops_vector->co_random_ops != NULL) {
665 			crypto_mech_info_t *rand_mi;
666 
667 			/*
668 			 * Need the following check as it is possible to have
669 			 * a provider that implements just random_ops and has
670 			 * pi_mechanisms == NULL.
671 			 */
672 			if (info->pi_mechanisms != NULL) {
673 				bcopy(info->pi_mechanisms, desc->pd_mechanisms,
674 				    sizeof (crypto_mech_info_t) * (mcount - 1));
675 			}
676 			rand_mi = &desc->pd_mechanisms[mcount - 1];
677 
678 			bzero(rand_mi, sizeof (crypto_mech_info_t));
679 			(void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
680 			    CRYPTO_MAX_MECH_NAME);
681 			rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
682 		} else {
683 			ASSERT(info->pi_mechanisms != NULL);
684 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
685 			    sizeof (crypto_mech_info_t) * mcount);
686 		}
687 	}
688 
689 	/*
690 	 * For each mechanism support by the provider, add the provider
691 	 * to the corresponding KCF mechanism mech_entry chain.
692 	 */
693 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
694 		crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
695 
696 		if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
697 		    (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
698 			err = CRYPTO_ARGUMENTS_BAD;
699 			break;
700 		}
701 
702 		if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE &&
703 		    mi->cm_func_group_mask & CRYPTO_FG_DIGEST) {
704 			/*
705 			 * We ask the provider to specify the limit
706 			 * per hash mechanism. But, in practice, a
707 			 * hardware limitation means all hash mechanisms
708 			 * will have the same maximum size allowed for
709 			 * input data. So, we make it a per provider
710 			 * limit to keep it simple.
711 			 */
712 			if (mi->cm_max_input_length == 0) {
713 				err = CRYPTO_ARGUMENTS_BAD;
714 				break;
715 			} else {
716 				desc->pd_hash_limit = mi->cm_max_input_length;
717 			}
718 		}
719 
720 		if (kcf_add_mech_provider(mech_idx, desc, &pmd) != KCF_SUCCESS)
721 			break;
722 
723 		if (pmd == NULL)
724 			continue;
725 
726 		/* The provider will be used for this mechanism */
727 		desc_use_count++;
728 	}
729 
730 	/*
731 	 * Don't allow multiple software providers with disabled mechanisms
732 	 * to register. Subsequent enabling of mechanisms will result in
733 	 * an unsupported configuration, i.e. multiple software providers
734 	 * per mechanism.
735 	 */
736 	if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
737 		return (CRYPTO_ARGUMENTS_BAD);
738 
739 	if (err == KCF_SUCCESS)
740 		return (CRYPTO_SUCCESS);
741 
742 	/*
743 	 * An error occurred while adding the mechanism, cleanup
744 	 * and bail.
745 	 */
746 	for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
747 		kcf_remove_mech_provider(
748 		    desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
749 	}
750 
751 	if (err == KCF_MECH_TAB_FULL)
752 		return (CRYPTO_HOST_MEMORY);
753 
754 	return (CRYPTO_ARGUMENTS_BAD);
755 }
756 
757 /*
758  * Update routine for kstat. Only privileged users are allowed to
759  * access this information, since this information is sensitive.
760  * There are some cryptographic attacks (e.g. traffic analysis)
761  * which can use this information.
762  */
763 static int
764 kcf_prov_kstat_update(kstat_t *ksp, int rw)
765 {
766 	kcf_prov_stats_t *ks_data;
767 	kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
768 
769 	if (rw == KSTAT_WRITE)
770 		return (EACCES);
771 
772 	ks_data = ksp->ks_data;
773 
774 	if (secpolicy_sys_config(CRED(), B_TRUE) != 0) {
775 		ks_data->ps_ops_total.value.ui64 = 0;
776 		ks_data->ps_ops_passed.value.ui64 = 0;
777 		ks_data->ps_ops_failed.value.ui64 = 0;
778 		ks_data->ps_ops_busy_rval.value.ui64 = 0;
779 	} else {
780 		ks_data->ps_ops_total.value.ui64 =
781 		    pd->pd_sched_info.ks_ndispatches;
782 		ks_data->ps_ops_failed.value.ui64 =
783 		    pd->pd_sched_info.ks_nfails;
784 		ks_data->ps_ops_busy_rval.value.ui64 =
785 		    pd->pd_sched_info.ks_nbusy_rval;
786 		ks_data->ps_ops_passed.value.ui64 =
787 		    pd->pd_sched_info.ks_ndispatches -
788 		    pd->pd_sched_info.ks_nfails -
789 		    pd->pd_sched_info.ks_nbusy_rval;
790 	}
791 
792 	return (0);
793 }
794 
795 
796 /*
797  * Utility routine called from failure paths in crypto_register_provider()
798  * and from crypto_load_soft_disabled().
799  */
800 void
801 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
802 {
803 	uint_t mech_idx;
804 
805 	/* remove the provider from the mechanisms tables */
806 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
807 	    mech_idx++) {
808 		kcf_remove_mech_provider(
809 		    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
810 	}
811 
812 	/* remove provider from providers table */
813 	if (remove_prov)
814 		(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
815 }
816 
817 static void
818 undo_register_provider_extra(kcf_provider_desc_t *desc)
819 {
820 	delete_kstat(desc);
821 	undo_register_provider(desc, B_TRUE);
822 }
823 
824 /*
825  * Utility routine called from crypto_load_soft_disabled(). Callers
826  * should have done a prior undo_register_provider().
827  */
828 void
829 redo_register_provider(kcf_provider_desc_t *pd)
830 {
831 	/* process the mechanisms supported by the provider */
832 	(void) init_prov_mechs(NULL, pd);
833 
834 	/*
835 	 * Hold provider in providers table. We should not call
836 	 * kcf_prov_tab_add_provider() here as the provider descriptor
837 	 * is still valid which means it has an entry in the provider
838 	 * table.
839 	 */
840 	KCF_PROV_REFHOLD(pd);
841 	KCF_PROV_IREFHOLD(pd);
842 }
843 
844 /*
845  * Add provider (p1) to another provider's array of providers (p2).
846  * Hardware and logical providers use this array to cross-reference
847  * each other.
848  */
849 static void
850 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
851 {
852 	kcf_provider_list_t *new;
853 
854 	new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
855 	mutex_enter(&p2->pd_lock);
856 	new->pl_next = p2->pd_provider_list;
857 	p2->pd_provider_list = new;
858 	KCF_PROV_IREFHOLD(p1);
859 	new->pl_provider = p1;
860 	mutex_exit(&p2->pd_lock);
861 }
862 
863 /*
864  * Remove provider (p1) from another provider's array of providers (p2).
865  * Hardware and logical providers use this array to cross-reference
866  * each other.
867  */
868 static void
869 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
870 {
871 
872 	kcf_provider_list_t *pl = NULL, **prev;
873 
874 	mutex_enter(&p2->pd_lock);
875 	for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
876 	    pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
877 		if (pl->pl_provider == p1) {
878 			break;
879 		}
880 	}
881 
882 	if (p1 == NULL) {
883 		mutex_exit(&p2->pd_lock);
884 		return;
885 	}
886 
887 	/* detach and free kcf_provider_list structure */
888 	KCF_PROV_IREFRELE(p1);
889 	*prev = pl->pl_next;
890 	kmem_free(pl, sizeof (*pl));
891 	mutex_exit(&p2->pd_lock);
892 }
893 
894 /*
895  * Convert an array of logical provider handles (crypto_provider_id)
896  * stored in a crypto_provider_info structure into an array of provider
897  * descriptors (kcf_provider_desc_t) attached to a logical provider.
898  */
899 static void
900 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
901 {
902 	kcf_provider_desc_t *lp;
903 	crypto_provider_id_t handle;
904 	int count = info->pi_logical_provider_count;
905 	int i;
906 
907 	/* add hardware provider to each logical provider */
908 	for (i = 0; i < count; i++) {
909 		handle = info->pi_logical_providers[i];
910 		lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
911 		if (lp == NULL) {
912 			continue;
913 		}
914 		add_provider_to_array(hp, lp);
915 		hp->pd_flags |= KCF_LPROV_MEMBER;
916 
917 		/*
918 		 * A hardware provider has to have the provider descriptor of
919 		 * every logical provider it belongs to, so it can be removed
920 		 * from the logical provider if the hardware provider
921 		 * unregisters from the framework.
922 		 */
923 		add_provider_to_array(lp, hp);
924 		KCF_PROV_REFRELE(lp);
925 	}
926 }
927 
928 /*
929  * This routine removes a provider from all of the logical or
930  * hardware providers it belongs to, and frees the provider's
931  * array of pointers to providers.
932  */
933 static void
934 remove_provider(kcf_provider_desc_t *pp)
935 {
936 	kcf_provider_desc_t *p;
937 	kcf_provider_list_t *e, *next;
938 
939 	mutex_enter(&pp->pd_lock);
940 	for (e = pp->pd_provider_list; e != NULL; e = next) {
941 		p = e->pl_provider;
942 		remove_provider_from_array(pp, p);
943 		if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
944 		    p->pd_provider_list == NULL)
945 			p->pd_flags &= ~KCF_LPROV_MEMBER;
946 		KCF_PROV_IREFRELE(p);
947 		next = e->pl_next;
948 		kmem_free(e, sizeof (*e));
949 	}
950 	pp->pd_provider_list = NULL;
951 	mutex_exit(&pp->pd_lock);
952 }
953 
954 /*
955  * Dispatch events as needed for a provider. is_added flag tells
956  * whether the provider is registering or unregistering.
957  */
958 void
959 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
960 {
961 	int i;
962 	crypto_notify_event_change_t ec;
963 
964 	ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
965 
966 	/*
967 	 * Inform interested clients of the mechanisms becoming
968 	 * available/unavailable. We skip this for logical providers
969 	 * as they do not affect mechanisms.
970 	 */
971 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
972 		ec.ec_provider_type = prov_desc->pd_prov_type;
973 		ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
974 		    CRYPTO_MECH_REMOVED;
975 		for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
976 			/* Skip any mechanisms not allowed by the policy */
977 			if (is_mech_disabled(prov_desc,
978 			    prov_desc->pd_mechanisms[i].cm_mech_name))
979 				continue;
980 
981 			(void) strncpy(ec.ec_mech_name,
982 			    prov_desc->pd_mechanisms[i].cm_mech_name,
983 			    CRYPTO_MAX_MECH_NAME);
984 			kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
985 		}
986 
987 	}
988 
989 	/*
990 	 * Inform interested clients about the new or departing provider.
991 	 * In case of a logical provider, we need to notify the event only
992 	 * for the logical provider and not for the underlying
993 	 * providers which are known by the KCF_LPROV_MEMBER bit.
994 	 */
995 	if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
996 	    (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
997 		kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
998 		    CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
999 	}
1000 }
1001 
1002 static void
1003 delete_kstat(kcf_provider_desc_t *desc)
1004 {
1005 	/* destroy the kstat created for this provider */
1006 	if (desc->pd_kstat != NULL) {
1007 		kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
1008 
1009 		/* release reference held by desc->pd_kstat->ks_private */
1010 		ASSERT(desc == kspd);
1011 		kstat_delete(kspd->pd_kstat);
1012 		desc->pd_kstat = NULL;
1013 		KCF_PROV_REFRELE(kspd);
1014 		KCF_PROV_IREFRELE(kspd);
1015 	}
1016 }
1017