1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2010 Nexenta Systems, Inc. All rights reserved.
27 */
28
29 /*
30 * This file is part of the core Kernel Cryptographic Framework.
31 * It implements the SPI functions exported to cryptographic
32 * providers.
33 */
34
35 #include <sys/ksynch.h>
36 #include <sys/cmn_err.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/modctl.h>
40 #include <sys/crypto/common.h>
41 #include <sys/crypto/impl.h>
42 #include <sys/crypto/sched_impl.h>
43 #include <sys/crypto/spi.h>
44 #include <sys/crypto/ioctladmin.h>
45 #include <sys/taskq.h>
46 #include <sys/disp.h>
47 #include <sys/kstat.h>
48 #include <sys/policy.h>
49 #include <sys/cpuvar.h>
50
51 /*
52 * minalloc and maxalloc values to be used for taskq_create().
53 */
54 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
55 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
56 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
57
58 static void remove_provider(kcf_provider_desc_t *);
59 static void process_logical_providers(crypto_provider_info_t *,
60 kcf_provider_desc_t *);
61 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
62 static int kcf_prov_kstat_update(kstat_t *, int);
63 static void delete_kstat(kcf_provider_desc_t *);
64
65 static kcf_prov_stats_t kcf_stats_ks_data_template = {
66 { "kcf_ops_total", KSTAT_DATA_UINT64 },
67 { "kcf_ops_passed", KSTAT_DATA_UINT64 },
68 { "kcf_ops_failed", KSTAT_DATA_UINT64 },
69 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 }
70 };
71
72 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
73 *((dst)->ops) = *((src)->ops);
74
75 extern int sys_shutdown;
76
77 /*
78 * Copy an ops vector from src to dst. Used during provider registration
79 * to copy the ops vector from the provider info structure to the
80 * provider descriptor maintained by KCF.
81 * Copying the ops vector specified by the provider is needed since the
82 * framework does not require the provider info structure to be
83 * persistent.
84 */
85 static void
copy_ops_vector_v1(crypto_ops_t * src_ops,crypto_ops_t * dst_ops)86 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
87 {
88 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
89 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
90 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
91 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
92 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
93 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
94 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
95 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
96 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
97 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
98 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
99 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
100 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
101 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
102 }
103
104 static void
copy_ops_vector_v2(crypto_ops_t * src_ops,crypto_ops_t * dst_ops)105 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
106 {
107 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
108 }
109
110 static void
copy_ops_vector_v3(crypto_ops_t * src_ops,crypto_ops_t * dst_ops)111 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
112 {
113 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
114 }
115
116 static void
copy_ops_vector_v4(crypto_ops_t * src_ops,crypto_ops_t * dst_ops)117 copy_ops_vector_v4(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
118 {
119 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_fips140_ops);
120 }
121
122 /*
123 * This routine is used to add cryptographic providers to the KEF framework.
124 * Providers pass a crypto_provider_info structure to crypto_register_provider()
125 * and get back a handle. The crypto_provider_info structure contains a
126 * list of mechanisms supported by the provider and an ops vector containing
127 * provider entry points. Hardware providers call this routine in their attach
128 * routines. Software providers call this routine in their _init() routine.
129 */
130 int
crypto_register_provider(crypto_provider_info_t * info,crypto_kcf_provider_handle_t * handle)131 crypto_register_provider(crypto_provider_info_t *info,
132 crypto_kcf_provider_handle_t *handle)
133 {
134 struct modctl *mcp;
135 char *name;
136 char ks_name[KSTAT_STRLEN];
137 kcf_provider_desc_t *prov_desc = NULL;
138 int ret = CRYPTO_ARGUMENTS_BAD;
139
140 if (info->pi_interface_version > CRYPTO_SPI_VERSION_4) {
141 ret = CRYPTO_VERSION_MISMATCH;
142 goto errormsg;
143 }
144
145 /*
146 * Check provider type, must be software, hardware, or logical.
147 */
148 if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
149 info->pi_provider_type != CRYPTO_SW_PROVIDER &&
150 info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
151 goto errormsg;
152
153 /*
154 * Allocate and initialize a new provider descriptor. We also
155 * hold it and release it when done.
156 */
157 prov_desc = kcf_alloc_provider_desc(info);
158 KCF_PROV_REFHOLD(prov_desc);
159
160 prov_desc->pd_prov_type = info->pi_provider_type;
161
162 /* provider-private handle, opaque to KCF */
163 prov_desc->pd_prov_handle = info->pi_provider_handle;
164
165 /* copy provider description string */
166 if (info->pi_provider_description != NULL) {
167 /*
168 * pi_provider_descriptor is a string that can contain
169 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
170 * INCLUDING the terminating null character. A bcopy()
171 * is necessary here as pd_description should not have
172 * a null character. See comments in kcf_alloc_provider_desc()
173 * for details on pd_description field.
174 */
175 bcopy(info->pi_provider_description, prov_desc->pd_description,
176 min(strlen(info->pi_provider_description),
177 CRYPTO_PROVIDER_DESCR_MAX_LEN));
178 }
179
180 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
181 if (info->pi_ops_vector == NULL) {
182 goto bail;
183 }
184 copy_ops_vector_v1(info->pi_ops_vector,
185 prov_desc->pd_ops_vector);
186 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
187 copy_ops_vector_v2(info->pi_ops_vector,
188 prov_desc->pd_ops_vector);
189 prov_desc->pd_flags = info->pi_flags;
190 }
191 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_3) {
192 copy_ops_vector_v3(info->pi_ops_vector,
193 prov_desc->pd_ops_vector);
194 }
195 if (info->pi_interface_version == CRYPTO_SPI_VERSION_4) {
196 copy_ops_vector_v4(info->pi_ops_vector,
197 prov_desc->pd_ops_vector);
198 }
199 }
200
201 /* object_ops and nostore_key_ops are mutually exclusive */
202 if (prov_desc->pd_ops_vector->co_object_ops &&
203 prov_desc->pd_ops_vector->co_nostore_key_ops) {
204 goto bail;
205 }
206 /*
207 * For software providers, copy the module name and module ID.
208 * For hardware providers, copy the driver name and instance.
209 */
210 switch (info->pi_provider_type) {
211 case CRYPTO_SW_PROVIDER:
212 if (info->pi_provider_dev.pd_sw == NULL)
213 goto bail;
214
215 if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL)
216 goto bail;
217
218 prov_desc->pd_module_id = mcp->mod_id;
219 name = mcp->mod_modname;
220 break;
221
222 case CRYPTO_HW_PROVIDER:
223 case CRYPTO_LOGICAL_PROVIDER:
224 if (info->pi_provider_dev.pd_hw == NULL)
225 goto bail;
226
227 prov_desc->pd_instance =
228 ddi_get_instance(info->pi_provider_dev.pd_hw);
229 name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw);
230 break;
231 }
232 if (name == NULL)
233 goto bail;
234
235 prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
236 (void) strcpy(prov_desc->pd_name, name);
237
238 if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL)
239 goto bail;
240
241 /* process the mechanisms supported by the provider */
242 if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
243 goto bail;
244
245 /*
246 * Add provider to providers tables, also sets the descriptor
247 * pd_prov_id field.
248 */
249 if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
250 undo_register_provider(prov_desc, B_FALSE);
251 goto bail;
252 }
253
254 /*
255 * We create a taskq only for a hardware provider. The global
256 * software queue is used for software providers. We handle ordering
257 * of multi-part requests in the taskq routine. So, it is safe to
258 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
259 * to keep some entries cached to improve performance.
260 */
261 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
262 prov_desc->pd_taskq = taskq_create("kcf_taskq",
263 crypto_taskq_threads, minclsyspri,
264 crypto_taskq_minalloc, crypto_taskq_maxalloc,
265 TASKQ_PREPOPULATE);
266 else
267 prov_desc->pd_taskq = NULL;
268
269 /* no kernel session to logical providers and no pd_flags */
270 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
271 /*
272 * Open a session for session-oriented providers. This session
273 * is used for all kernel consumers. This is fine as a provider
274 * is required to support multiple thread access to a session.
275 * We can do this only after the taskq has been created as we
276 * do a kcf_submit_request() to open the session.
277 */
278 if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
279 kcf_req_params_t params;
280
281 KCF_WRAP_SESSION_OPS_PARAMS(¶ms,
282 KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
283 CRYPTO_USER, NULL, 0, prov_desc);
284 ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms,
285 B_FALSE);
286 if (ret != CRYPTO_SUCCESS)
287 goto undo_then_bail;
288 }
289
290 /*
291 * Get the value for the maximum input length allowed if
292 * CRYPTO_HASH_NO_UPDATE or CRYPTO_HASH_NO_UPDATE is specified.
293 */
294 if (prov_desc->pd_flags &
295 (CRYPTO_HASH_NO_UPDATE | CRYPTO_HMAC_NO_UPDATE)) {
296 kcf_req_params_t params;
297 crypto_provider_ext_info_t ext_info;
298
299 if (KCF_PROV_PROVMGMT_OPS(prov_desc) == NULL)
300 goto undo_then_bail;
301
302 bzero(&ext_info, sizeof (ext_info));
303 KCF_WRAP_PROVMGMT_OPS_PARAMS(¶ms,
304 KCF_OP_MGMT_EXTINFO,
305 0, NULL, 0, NULL, 0, NULL, &ext_info, prov_desc);
306 ret = kcf_submit_request(prov_desc, NULL, NULL,
307 ¶ms, B_FALSE);
308 if (ret != CRYPTO_SUCCESS)
309 goto undo_then_bail;
310
311 if (prov_desc->pd_flags & CRYPTO_HASH_NO_UPDATE) {
312 prov_desc->pd_hash_limit =
313 ext_info.ei_hash_max_input_len;
314 }
315 if (prov_desc->pd_flags & CRYPTO_HMAC_NO_UPDATE) {
316 prov_desc->pd_hmac_limit =
317 ext_info.ei_hmac_max_input_len;
318 }
319 }
320 }
321
322 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
323 /*
324 * Create the kstat for this provider. There is a kstat
325 * installed for each successfully registered provider.
326 * This kstat is deleted, when the provider unregisters.
327 */
328 if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
329 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
330 prov_desc->pd_name, "provider_stats");
331 } else {
332 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
333 prov_desc->pd_name, prov_desc->pd_instance,
334 prov_desc->pd_prov_id, "provider_stats");
335 }
336
337 prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
338 KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
339 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
340
341 if (prov_desc->pd_kstat != NULL) {
342 bcopy(&kcf_stats_ks_data_template,
343 &prov_desc->pd_ks_data,
344 sizeof (kcf_stats_ks_data_template));
345 prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
346 KCF_PROV_REFHOLD(prov_desc);
347 prov_desc->pd_kstat->ks_private = prov_desc;
348 prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
349 kstat_install(prov_desc->pd_kstat);
350 }
351 }
352
353 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
354 process_logical_providers(info, prov_desc);
355
356 mutex_enter(&prov_desc->pd_lock);
357 prov_desc->pd_state = KCF_PROV_READY;
358 mutex_exit(&prov_desc->pd_lock);
359 kcf_do_notify(prov_desc, B_TRUE);
360
361 *handle = prov_desc->pd_kcf_prov_handle;
362 KCF_PROV_REFRELE(prov_desc);
363 return (CRYPTO_SUCCESS);
364
365 undo_then_bail:
366 undo_register_provider(prov_desc, B_TRUE);
367 ret = CRYPTO_FAILED;
368 bail:
369 KCF_PROV_REFRELE(prov_desc);
370
371 errormsg:
372 if (ret != CRYPTO_SUCCESS && sys_shutdown == 0) {
373 switch (ret) {
374 case CRYPTO_FAILED:
375 cmn_err(CE_WARN, "%s failed when registering with the "
376 "Cryptographic Framework.",
377 info->pi_provider_description);
378 break;
379
380 case CRYPTO_MODVERIFICATION_FAILED:
381 cmn_err(CE_WARN, "%s failed module verification when "
382 "registering with the Cryptographic Framework.",
383 info->pi_provider_description);
384 break;
385
386 case CRYPTO_ARGUMENTS_BAD:
387 cmn_err(CE_WARN, "%s provided bad arguments and was "
388 "not registered with the Cryptographic Framework.",
389 info->pi_provider_description);
390 break;
391
392 case CRYPTO_VERSION_MISMATCH:
393 cmn_err(CE_WARN, "%s was not registered with the "
394 "Cryptographic Framework as there is a SPI version "
395 "mismatch (%d) error.",
396 info->pi_provider_description,
397 info->pi_interface_version);
398 break;
399
400 case CRYPTO_FIPS140_ERROR:
401 cmn_err(CE_WARN, "%s was not registered with the "
402 "Cryptographic Framework as there was a FIPS 140 "
403 "validation error.", info->pi_provider_description);
404 break;
405
406 default:
407 cmn_err(CE_WARN, "%s did not register with the "
408 "Cryptographic Framework. (0x%x)",
409 info->pi_provider_description, ret);
410 };
411 }
412
413 return (ret);
414 }
415
416 /* Return the number of holds on a provider. */
417 int
kcf_get_refcnt(kcf_provider_desc_t * pd,boolean_t do_lock)418 kcf_get_refcnt(kcf_provider_desc_t *pd, boolean_t do_lock)
419 {
420 int i;
421 int refcnt = 0;
422
423 if (do_lock)
424 for (i = 0; i < pd->pd_nbins; i++)
425 mutex_enter(&(pd->pd_percpu_bins[i].kp_lock));
426
427 for (i = 0; i < pd->pd_nbins; i++)
428 refcnt += pd->pd_percpu_bins[i].kp_holdcnt;
429
430 if (do_lock)
431 for (i = 0; i < pd->pd_nbins; i++)
432 mutex_exit(&(pd->pd_percpu_bins[i].kp_lock));
433
434 return (refcnt);
435 }
436
437 /*
438 * This routine is used to notify the framework when a provider is being
439 * removed. Hardware providers call this routine in their detach routines.
440 * Software providers call this routine in their _fini() routine.
441 */
442 int
crypto_unregister_provider(crypto_kcf_provider_handle_t handle)443 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
444 {
445 uint_t mech_idx;
446 kcf_provider_desc_t *desc;
447 kcf_prov_state_t saved_state;
448 int ret = CRYPTO_SUCCESS;
449
450 /* lookup provider descriptor */
451 if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) ==
452 NULL) {
453 ret = CRYPTO_UNKNOWN_PROVIDER;
454 goto errormsg;
455 }
456
457 mutex_enter(&desc->pd_lock);
458 /*
459 * Check if any other thread is disabling or removing
460 * this provider. We return if this is the case.
461 */
462 if (desc->pd_state >= KCF_PROV_DISABLED) {
463 mutex_exit(&desc->pd_lock);
464 /* Release reference held by kcf_prov_tab_lookup(). */
465 KCF_PROV_REFRELE(desc);
466 ret = CRYPTO_BUSY;
467 goto errormsg;
468 }
469
470 saved_state = desc->pd_state;
471 desc->pd_state = KCF_PROV_UNREGISTERING;
472
473 if (saved_state == KCF_PROV_BUSY) {
474 /*
475 * The per-provider taskq threads may be waiting. We
476 * signal them so that they can start failing requests.
477 */
478 cv_broadcast(&desc->pd_resume_cv);
479 }
480
481 mutex_exit(&desc->pd_lock);
482
483 if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
484 remove_provider(desc);
485 }
486
487 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
488 /* remove the provider from the mechanisms tables */
489 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
490 mech_idx++) {
491 kcf_remove_mech_provider(
492 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
493 }
494 }
495
496 /* remove provider from providers table */
497 if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
498 CRYPTO_SUCCESS) {
499 /* Release reference held by kcf_prov_tab_lookup(). */
500 KCF_PROV_REFRELE(desc);
501 ret = CRYPTO_UNKNOWN_PROVIDER;
502 goto errormsg;
503 }
504
505 delete_kstat(desc);
506
507 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
508 /*
509 * Wait till the existing requests with the provider complete
510 * and all the holds are released. All the holds on a software
511 * provider are from kernel clients and the hold time
512 * is expected to be short. So, we won't be stuck here forever.
513 */
514 while (kcf_get_refcnt(desc, B_TRUE) > 1) {
515 /* wait 1 second and try again. */
516 delay(1 * drv_usectohz(1000000));
517 }
518 } else {
519 int i;
520 kcf_prov_cpu_t *mp;
521
522 /*
523 * Wait until requests that have been sent to the provider
524 * complete.
525 */
526 for (i = 0; i < desc->pd_nbins; i++) {
527 mp = &(desc->pd_percpu_bins[i]);
528
529 mutex_enter(&mp->kp_lock);
530 while (mp->kp_jobcnt > 0) {
531 cv_wait(&mp->kp_cv, &mp->kp_lock);
532 }
533 mutex_exit(&mp->kp_lock);
534 }
535 }
536
537 mutex_enter(&desc->pd_lock);
538 desc->pd_state = KCF_PROV_UNREGISTERED;
539 mutex_exit(&desc->pd_lock);
540
541 kcf_do_notify(desc, B_FALSE);
542
543 mutex_enter(&prov_tab_mutex);
544 /* Release reference held by kcf_prov_tab_lookup(). */
545 KCF_PROV_REFRELE(desc);
546
547 if (kcf_get_refcnt(desc, B_TRUE) == 0) {
548 /* kcf_free_provider_desc drops prov_tab_mutex */
549 kcf_free_provider_desc(desc);
550 } else {
551 ASSERT(desc->pd_prov_type != CRYPTO_SW_PROVIDER);
552 /*
553 * We could avoid this if /dev/crypto can proactively
554 * remove any holds on us from a dormant PKCS #11 app.
555 * For now, we check the provider table for
556 * KCF_PROV_UNREGISTERED entries when a provider is
557 * added to the table or when a provider is removed from it
558 * and free them when refcnt reaches zero.
559 */
560 kcf_need_provtab_walk = B_TRUE;
561 mutex_exit(&prov_tab_mutex);
562 }
563
564 errormsg:
565 if (ret != CRYPTO_SUCCESS && sys_shutdown == 0) {
566 switch (ret) {
567 case CRYPTO_UNKNOWN_PROVIDER:
568 cmn_err(CE_WARN, "Unknown provider \"%s\" was "
569 "requested to unregister from the cryptographic "
570 "framework.", desc->pd_description);
571 break;
572
573 case CRYPTO_BUSY:
574 cmn_err(CE_WARN, "%s could not be unregistered from "
575 "the Cryptographic Framework as it is busy.",
576 desc->pd_description);
577 break;
578
579 default:
580 cmn_err(CE_WARN, "%s did not unregister with the "
581 "Cryptographic Framework. (0x%x)",
582 desc->pd_description, ret);
583 };
584 }
585
586 return (ret);
587 }
588
589 /*
590 * This routine is used to notify the framework that the state of
591 * a cryptographic provider has changed. Valid state codes are:
592 *
593 * CRYPTO_PROVIDER_READY
594 * The provider indicates that it can process more requests. A provider
595 * will notify with this event if it previously has notified us with a
596 * CRYPTO_PROVIDER_BUSY.
597 *
598 * CRYPTO_PROVIDER_BUSY
599 * The provider can not take more requests.
600 *
601 * CRYPTO_PROVIDER_FAILED
602 * The provider encountered an internal error. The framework will not
603 * be sending any more requests to the provider. The provider may notify
604 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
605 *
606 * This routine can be called from user or interrupt context.
607 */
608 void
crypto_provider_notification(crypto_kcf_provider_handle_t handle,uint_t state)609 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
610 {
611 kcf_provider_desc_t *pd;
612
613 /* lookup the provider from the given handle */
614 if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
615 return;
616
617 mutex_enter(&pd->pd_lock);
618
619 if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
620 goto out;
621
622 if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
623 cmn_err(CE_WARN, "crypto_provider_notification: "
624 "logical provider (%x) ignored\n", handle);
625 goto out;
626 }
627 switch (state) {
628 case CRYPTO_PROVIDER_READY:
629 switch (pd->pd_state) {
630 case KCF_PROV_BUSY:
631 pd->pd_state = KCF_PROV_READY;
632 /*
633 * Signal the per-provider taskq threads that they
634 * can start submitting requests.
635 */
636 cv_broadcast(&pd->pd_resume_cv);
637 break;
638
639 case KCF_PROV_FAILED:
640 /*
641 * The provider recovered from the error. Let us
642 * use it now.
643 */
644 pd->pd_state = KCF_PROV_READY;
645 break;
646 }
647 break;
648
649 case CRYPTO_PROVIDER_BUSY:
650 switch (pd->pd_state) {
651 case KCF_PROV_READY:
652 pd->pd_state = KCF_PROV_BUSY;
653 break;
654 }
655 break;
656
657 case CRYPTO_PROVIDER_FAILED:
658 /*
659 * We note the failure and return. The per-provider taskq
660 * threads check this flag and start failing the
661 * requests, if it is set. See process_req_hwp() for details.
662 */
663 switch (pd->pd_state) {
664 case KCF_PROV_READY:
665 pd->pd_state = KCF_PROV_FAILED;
666 break;
667
668 case KCF_PROV_BUSY:
669 pd->pd_state = KCF_PROV_FAILED;
670 /*
671 * The per-provider taskq threads may be waiting. We
672 * signal them so that they can start failing requests.
673 */
674 cv_broadcast(&pd->pd_resume_cv);
675 break;
676 }
677 break;
678 }
679 out:
680 mutex_exit(&pd->pd_lock);
681 KCF_PROV_REFRELE(pd);
682 }
683
684 /*
685 * This routine is used to notify the framework the result of
686 * an asynchronous request handled by a provider. Valid error
687 * codes are the same as the CRYPTO_* errors defined in common.h.
688 *
689 * This routine can be called from user or interrupt context.
690 */
691 void
crypto_op_notification(crypto_req_handle_t handle,int error)692 crypto_op_notification(crypto_req_handle_t handle, int error)
693 {
694 kcf_call_type_t ctype;
695
696 if (handle == NULL)
697 return;
698
699 if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
700 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
701
702 KCF_PROV_JOB_RELE_STAT(sreq->sn_mp, (error != CRYPTO_SUCCESS));
703 kcf_sop_done(sreq, error);
704 } else {
705 kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
706
707 ASSERT(ctype == CRYPTO_ASYNCH);
708 KCF_PROV_JOB_RELE_STAT(areq->an_mp, (error != CRYPTO_SUCCESS));
709 kcf_aop_done(areq, error);
710 }
711 }
712
713 /*
714 * This routine is used by software providers to determine
715 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
716 * Note that hardware providers can always use KM_SLEEP. So,
717 * they do not need to call this routine.
718 *
719 * This routine can be called from user or interrupt context.
720 */
721 int
crypto_kmflag(crypto_req_handle_t handle)722 crypto_kmflag(crypto_req_handle_t handle)
723 {
724 return (REQHNDL2_KMFLAG(handle));
725 }
726
727 /*
728 * Process the mechanism info structures specified by the provider
729 * during registration. A NULL crypto_provider_info_t indicates
730 * an already initialized provider descriptor.
731 *
732 * Mechanisms are not added to the kernel's mechanism table if the
733 * provider is a logical provider.
734 *
735 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
736 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
737 * if the table of mechanisms is full.
738 */
739 static int
init_prov_mechs(crypto_provider_info_t * info,kcf_provider_desc_t * desc)740 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
741 {
742 uint_t mech_idx;
743 uint_t cleanup_idx;
744 int err = CRYPTO_SUCCESS;
745 kcf_prov_mech_desc_t *pmd;
746 int desc_use_count = 0;
747 int mcount = desc->pd_mech_list_count;
748
749 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
750 if (info != NULL) {
751 ASSERT(info->pi_mechanisms != NULL);
752 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
753 sizeof (crypto_mech_info_t) * mcount);
754 }
755 return (CRYPTO_SUCCESS);
756 }
757
758 /*
759 * Copy the mechanism list from the provider info to the provider
760 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
761 * element if the provider has random_ops since we keep an internal
762 * mechanism, SUN_RANDOM, in this case.
763 */
764 if (info != NULL) {
765 if (info->pi_ops_vector->co_random_ops != NULL) {
766 crypto_mech_info_t *rand_mi;
767
768 /*
769 * Need the following check as it is possible to have
770 * a provider that implements just random_ops and has
771 * pi_mechanisms == NULL.
772 */
773 if (info->pi_mechanisms != NULL) {
774 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
775 sizeof (crypto_mech_info_t) * (mcount - 1));
776 }
777 rand_mi = &desc->pd_mechanisms[mcount - 1];
778
779 bzero(rand_mi, sizeof (crypto_mech_info_t));
780 (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
781 CRYPTO_MAX_MECH_NAME);
782 rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
783 } else {
784 ASSERT(info->pi_mechanisms != NULL);
785 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
786 sizeof (crypto_mech_info_t) * mcount);
787 }
788 }
789
790 /*
791 * For each mechanism support by the provider, add the provider
792 * to the corresponding KCF mechanism mech_entry chain.
793 */
794 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
795 crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
796
797 if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
798 (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
799 err = CRYPTO_ARGUMENTS_BAD;
800 break;
801 }
802
803 if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
804 KCF_SUCCESS)
805 break;
806
807 if (pmd == NULL)
808 continue;
809
810 /* The provider will be used for this mechanism */
811 desc_use_count++;
812 }
813
814 /*
815 * Don't allow multiple software providers with disabled mechanisms
816 * to register. Subsequent enabling of mechanisms will result in
817 * an unsupported configuration, i.e. multiple software providers
818 * per mechanism.
819 */
820 if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
821 return (CRYPTO_ARGUMENTS_BAD);
822
823 if (err == KCF_SUCCESS)
824 return (CRYPTO_SUCCESS);
825
826 /*
827 * An error occurred while adding the mechanism, cleanup
828 * and bail.
829 */
830 for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
831 kcf_remove_mech_provider(
832 desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
833 }
834
835 if (err == KCF_MECH_TAB_FULL)
836 return (CRYPTO_HOST_MEMORY);
837
838 return (CRYPTO_ARGUMENTS_BAD);
839 }
840
841 /*
842 * Update routine for kstat. Only privileged users are allowed to
843 * access this information, since this information is sensitive.
844 * There are some cryptographic attacks (e.g. traffic analysis)
845 * which can use this information.
846 */
847 static int
kcf_prov_kstat_update(kstat_t * ksp,int rw)848 kcf_prov_kstat_update(kstat_t *ksp, int rw)
849 {
850 kcf_prov_stats_t *ks_data;
851 kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
852 int i;
853
854 if (rw == KSTAT_WRITE)
855 return (EACCES);
856
857 ks_data = ksp->ks_data;
858
859 if (secpolicy_sys_config(CRED(), B_TRUE) != 0) {
860 ks_data->ps_ops_total.value.ui64 = 0;
861 ks_data->ps_ops_passed.value.ui64 = 0;
862 ks_data->ps_ops_failed.value.ui64 = 0;
863 ks_data->ps_ops_busy_rval.value.ui64 = 0;
864 } else {
865 uint64_t dtotal, ftotal, btotal;
866
867 dtotal = ftotal = btotal = 0;
868 /* No locking done since an exact count is not required. */
869 for (i = 0; i < pd->pd_nbins; i++) {
870 dtotal += pd->pd_percpu_bins[i].kp_ndispatches;
871 ftotal += pd->pd_percpu_bins[i].kp_nfails;
872 btotal += pd->pd_percpu_bins[i].kp_nbusy_rval;
873 }
874
875 ks_data->ps_ops_total.value.ui64 = dtotal;
876 ks_data->ps_ops_failed.value.ui64 = ftotal;
877 ks_data->ps_ops_busy_rval.value.ui64 = btotal;
878 ks_data->ps_ops_passed.value.ui64 = dtotal - ftotal - btotal;
879 }
880
881 return (0);
882 }
883
884
885 /*
886 * Utility routine called from failure paths in crypto_register_provider()
887 * and from crypto_load_soft_disabled().
888 */
889 void
undo_register_provider(kcf_provider_desc_t * desc,boolean_t remove_prov)890 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
891 {
892 uint_t mech_idx;
893
894 /* remove the provider from the mechanisms tables */
895 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
896 mech_idx++) {
897 kcf_remove_mech_provider(
898 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
899 }
900
901 /* remove provider from providers table */
902 if (remove_prov)
903 (void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
904 }
905
906 /*
907 * Utility routine called from crypto_load_soft_disabled(). Callers
908 * should have done a prior undo_register_provider().
909 */
910 void
redo_register_provider(kcf_provider_desc_t * pd)911 redo_register_provider(kcf_provider_desc_t *pd)
912 {
913 /* process the mechanisms supported by the provider */
914 (void) init_prov_mechs(NULL, pd);
915
916 /*
917 * Hold provider in providers table. We should not call
918 * kcf_prov_tab_add_provider() here as the provider descriptor
919 * is still valid which means it has an entry in the provider
920 * table.
921 */
922 KCF_PROV_REFHOLD(pd);
923 }
924
925 /*
926 * Add provider (p1) to another provider's array of providers (p2).
927 * Hardware and logical providers use this array to cross-reference
928 * each other.
929 */
930 static void
add_provider_to_array(kcf_provider_desc_t * p1,kcf_provider_desc_t * p2)931 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
932 {
933 kcf_provider_list_t *new;
934
935 new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
936 mutex_enter(&p2->pd_lock);
937 new->pl_next = p2->pd_provider_list;
938 p2->pd_provider_list = new;
939 new->pl_provider = p1;
940 mutex_exit(&p2->pd_lock);
941 }
942
943 /*
944 * Remove provider (p1) from another provider's array of providers (p2).
945 * Hardware and logical providers use this array to cross-reference
946 * each other.
947 */
948 static void
remove_provider_from_array(kcf_provider_desc_t * p1,kcf_provider_desc_t * p2)949 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
950 {
951
952 kcf_provider_list_t *pl = NULL, **prev;
953
954 mutex_enter(&p2->pd_lock);
955 for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
956 pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
957 if (pl->pl_provider == p1) {
958 break;
959 }
960 }
961
962 if (p1 == NULL) {
963 mutex_exit(&p2->pd_lock);
964 return;
965 }
966
967 /* detach and free kcf_provider_list structure */
968 *prev = pl->pl_next;
969 kmem_free(pl, sizeof (*pl));
970 mutex_exit(&p2->pd_lock);
971 }
972
973 /*
974 * Convert an array of logical provider handles (crypto_provider_id)
975 * stored in a crypto_provider_info structure into an array of provider
976 * descriptors (kcf_provider_desc_t) attached to a logical provider.
977 */
978 static void
process_logical_providers(crypto_provider_info_t * info,kcf_provider_desc_t * hp)979 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
980 {
981 kcf_provider_desc_t *lp;
982 crypto_provider_id_t handle;
983 int count = info->pi_logical_provider_count;
984 int i;
985
986 /* add hardware provider to each logical provider */
987 for (i = 0; i < count; i++) {
988 handle = info->pi_logical_providers[i];
989 lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
990 if (lp == NULL) {
991 continue;
992 }
993 add_provider_to_array(hp, lp);
994 hp->pd_flags |= KCF_LPROV_MEMBER;
995
996 /*
997 * A hardware provider has to have the provider descriptor of
998 * every logical provider it belongs to, so it can be removed
999 * from the logical provider if the hardware provider
1000 * unregisters from the framework.
1001 */
1002 add_provider_to_array(lp, hp);
1003 KCF_PROV_REFRELE(lp);
1004 }
1005 }
1006
1007 /*
1008 * This routine removes a provider from all of the logical or
1009 * hardware providers it belongs to, and frees the provider's
1010 * array of pointers to providers.
1011 */
1012 static void
remove_provider(kcf_provider_desc_t * pp)1013 remove_provider(kcf_provider_desc_t *pp)
1014 {
1015 kcf_provider_desc_t *p;
1016 kcf_provider_list_t *e, *next;
1017
1018 mutex_enter(&pp->pd_lock);
1019 for (e = pp->pd_provider_list; e != NULL; e = next) {
1020 p = e->pl_provider;
1021 remove_provider_from_array(pp, p);
1022 if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
1023 p->pd_provider_list == NULL)
1024 p->pd_flags &= ~KCF_LPROV_MEMBER;
1025 next = e->pl_next;
1026 kmem_free(e, sizeof (*e));
1027 }
1028 pp->pd_provider_list = NULL;
1029 mutex_exit(&pp->pd_lock);
1030 }
1031
1032 /*
1033 * Dispatch events as needed for a provider. is_added flag tells
1034 * whether the provider is registering or unregistering.
1035 */
1036 void
kcf_do_notify(kcf_provider_desc_t * prov_desc,boolean_t is_added)1037 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
1038 {
1039 int i;
1040 crypto_notify_event_change_t ec;
1041
1042 ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
1043
1044 /*
1045 * Inform interested clients of the mechanisms becoming
1046 * available/unavailable. We skip this for logical providers
1047 * as they do not affect mechanisms.
1048 */
1049 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
1050 ec.ec_provider_type = prov_desc->pd_prov_type;
1051 ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
1052 CRYPTO_MECH_REMOVED;
1053 for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
1054 /* Skip any mechanisms not allowed by the policy */
1055 if (is_mech_disabled(prov_desc,
1056 prov_desc->pd_mechanisms[i].cm_mech_name))
1057 continue;
1058
1059 (void) strncpy(ec.ec_mech_name,
1060 prov_desc->pd_mechanisms[i].cm_mech_name,
1061 CRYPTO_MAX_MECH_NAME);
1062 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
1063 }
1064
1065 }
1066
1067 /*
1068 * Inform interested clients about the new or departing provider.
1069 * In case of a logical provider, we need to notify the event only
1070 * for the logical provider and not for the underlying
1071 * providers which are known by the KCF_LPROV_MEMBER bit.
1072 */
1073 if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
1074 (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
1075 kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
1076 CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
1077 }
1078 }
1079
1080 static void
delete_kstat(kcf_provider_desc_t * desc)1081 delete_kstat(kcf_provider_desc_t *desc)
1082 {
1083 /* destroy the kstat created for this provider */
1084 if (desc->pd_kstat != NULL) {
1085 kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
1086
1087 /* release reference held by desc->pd_kstat->ks_private */
1088 ASSERT(desc == kspd);
1089 kstat_delete(kspd->pd_kstat);
1090 desc->pd_kstat = NULL;
1091 KCF_PROV_REFRELE(kspd);
1092 }
1093 }
1094