1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25 /*
26 * Copyright 2010 Nexenta Systems, Inc. All rights reserved.
27 */
28
29 /*
30 * This file is part of the core Kernel Cryptographic Framework.
31 * It implements the SPI functions exported to cryptographic
32 * providers.
33 */
34
35 #include <sys/ksynch.h>
36 #include <sys/cmn_err.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/modctl.h>
40 #include <sys/crypto/common.h>
41 #include <sys/crypto/impl.h>
42 #include <sys/crypto/sched_impl.h>
43 #include <sys/crypto/spi.h>
44 #include <sys/crypto/ioctladmin.h>
45 #include <sys/taskq.h>
46 #include <sys/disp.h>
47 #include <sys/kstat.h>
48 #include <sys/policy.h>
49 #include <sys/cpuvar.h>
50
51 /*
52 * minalloc and maxalloc values to be used for taskq_create().
53 */
54 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
55 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
56 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
57
58 static void remove_provider(kcf_provider_desc_t *);
59 static void process_logical_providers(crypto_provider_info_t *,
60 kcf_provider_desc_t *);
61 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
62 static int kcf_prov_kstat_update(kstat_t *, int);
63 static void delete_kstat(kcf_provider_desc_t *);
64
65 static kcf_prov_stats_t kcf_stats_ks_data_template = {
66 { "kcf_ops_total", KSTAT_DATA_UINT64 },
67 { "kcf_ops_passed", KSTAT_DATA_UINT64 },
68 { "kcf_ops_failed", KSTAT_DATA_UINT64 },
69 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 }
70 };
71
72 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
73 *((dst)->ops) = *((src)->ops);
74
75 extern int sys_shutdown;
76
77 /*
78 * Copy an ops vector from src to dst. Used during provider registration
79 * to copy the ops vector from the provider info structure to the
80 * provider descriptor maintained by KCF.
81 * Copying the ops vector specified by the provider is needed since the
82 * framework does not require the provider info structure to be
83 * persistent.
84 */
85 static void
copy_ops_vector_v1(crypto_ops_t * src_ops,crypto_ops_t * dst_ops)86 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
87 {
88 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
89 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
90 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
91 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
92 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
93 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
94 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
95 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
96 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
97 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
98 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
99 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
100 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
101 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
102 }
103
104 static void
copy_ops_vector_v2(crypto_ops_t * src_ops,crypto_ops_t * dst_ops)105 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
106 {
107 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
108 }
109
110 static void
copy_ops_vector_v3(crypto_ops_t * src_ops,crypto_ops_t * dst_ops)111 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
112 {
113 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
114 }
115
116 static void
copy_ops_vector_v4(crypto_ops_t * src_ops,crypto_ops_t * dst_ops)117 copy_ops_vector_v4(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
118 {
119 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_fips140_ops);
120 }
121
122 /*
123 * This routine is used to add cryptographic providers to the KEF framework.
124 * Providers pass a crypto_provider_info structure to crypto_register_provider()
125 * and get back a handle. The crypto_provider_info structure contains a
126 * list of mechanisms supported by the provider and an ops vector containing
127 * provider entry points. Hardware providers call this routine in their attach
128 * routines. Software providers call this routine in their _init() routine.
129 */
130 int
crypto_register_provider(crypto_provider_info_t * info,crypto_kcf_provider_handle_t * handle)131 crypto_register_provider(crypto_provider_info_t *info,
132 crypto_kcf_provider_handle_t *handle)
133 {
134 struct modctl *mcp;
135 char *name;
136 char ks_name[KSTAT_STRLEN];
137 kcf_provider_desc_t *prov_desc = NULL;
138 int ret = CRYPTO_ARGUMENTS_BAD;
139
140 if (info->pi_interface_version > CRYPTO_SPI_VERSION_4) {
141 ret = CRYPTO_VERSION_MISMATCH;
142 goto errormsg;
143 }
144
145 /*
146 * Check provider type, must be software, hardware, or logical.
147 */
148 if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
149 info->pi_provider_type != CRYPTO_SW_PROVIDER &&
150 info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
151 goto errormsg;
152
153 /*
154 * Allocate and initialize a new provider descriptor. We also
155 * hold it and release it when done.
156 */
157 prov_desc = kcf_alloc_provider_desc(info);
158 KCF_PROV_REFHOLD(prov_desc);
159
160 prov_desc->pd_prov_type = info->pi_provider_type;
161
162 /* provider-private handle, opaque to KCF */
163 prov_desc->pd_prov_handle = info->pi_provider_handle;
164
165 /* copy provider description string */
166 if (info->pi_provider_description != NULL) {
167 /*
168 * pi_provider_descriptor is a string that can contain
169 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
170 * INCLUDING the terminating null character. A bcopy()
171 * is necessary here as pd_description should not have
172 * a null character. See comments in kcf_alloc_provider_desc()
173 * for details on pd_description field.
174 */
175 bcopy(info->pi_provider_description, prov_desc->pd_description,
176 min(strlen(info->pi_provider_description),
177 CRYPTO_PROVIDER_DESCR_MAX_LEN));
178 }
179
180 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
181 if (info->pi_ops_vector == NULL) {
182 goto bail;
183 }
184 copy_ops_vector_v1(info->pi_ops_vector,
185 prov_desc->pd_ops_vector);
186 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
187 copy_ops_vector_v2(info->pi_ops_vector,
188 prov_desc->pd_ops_vector);
189 prov_desc->pd_flags = info->pi_flags;
190 }
191 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_3) {
192 copy_ops_vector_v3(info->pi_ops_vector,
193 prov_desc->pd_ops_vector);
194 }
195 if (info->pi_interface_version == CRYPTO_SPI_VERSION_4) {
196 copy_ops_vector_v4(info->pi_ops_vector,
197 prov_desc->pd_ops_vector);
198 }
199 }
200
201 /* object_ops and nostore_key_ops are mutually exclusive */
202 if (prov_desc->pd_ops_vector->co_object_ops &&
203 prov_desc->pd_ops_vector->co_nostore_key_ops) {
204 goto bail;
205 }
206 /*
207 * For software providers, copy the module name and module ID.
208 * For hardware providers, copy the driver name and instance.
209 */
210 switch (info->pi_provider_type) {
211 case CRYPTO_SW_PROVIDER:
212 if (info->pi_provider_dev.pd_sw == NULL)
213 goto bail;
214
215 if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL)
216 goto bail;
217
218 prov_desc->pd_module_id = mcp->mod_id;
219 name = mcp->mod_modname;
220 break;
221
222 case CRYPTO_HW_PROVIDER:
223 case CRYPTO_LOGICAL_PROVIDER:
224 if (info->pi_provider_dev.pd_hw == NULL)
225 goto bail;
226
227 prov_desc->pd_instance =
228 ddi_get_instance(info->pi_provider_dev.pd_hw);
229 name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw);
230 break;
231 }
232 if (name == NULL)
233 goto bail;
234
235 prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
236 (void) strcpy(prov_desc->pd_name, name);
237
238 if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL)
239 goto bail;
240
241 /* process the mechanisms supported by the provider */
242 if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
243 goto bail;
244
245 /*
246 * Add provider to providers tables, also sets the descriptor
247 * pd_prov_id field.
248 */
249 if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
250 undo_register_provider(prov_desc, B_FALSE);
251 goto bail;
252 }
253
254 /*
255 * We create a taskq only for a hardware provider. The global
256 * software queue is used for software providers. We handle ordering
257 * of multi-part requests in the taskq routine. So, it is safe to
258 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
259 * to keep some entries cached to improve performance.
260 */
261 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
262 prov_desc->pd_taskq = taskq_create("kcf_taskq",
263 crypto_taskq_threads, minclsyspri,
264 crypto_taskq_minalloc, crypto_taskq_maxalloc,
265 TASKQ_PREPOPULATE);
266 else
267 prov_desc->pd_taskq = NULL;
268
269 /* no kernel session to logical providers and no pd_flags */
270 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
271 /*
272 * Open a session for session-oriented providers. This session
273 * is used for all kernel consumers. This is fine as a provider
274 * is required to support multiple thread access to a session.
275 * We can do this only after the taskq has been created as we
276 * do a kcf_submit_request() to open the session.
277 */
278 if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
279 kcf_req_params_t params;
280
281 KCF_WRAP_SESSION_OPS_PARAMS(¶ms,
282 KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
283 CRYPTO_USER, NULL, 0, prov_desc);
284 ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms,
285 B_FALSE);
286 if (ret != CRYPTO_SUCCESS)
287 goto undo_then_bail;
288 }
289
290 /*
291 * Get the value for the maximum input length allowed if
292 * CRYPTO_HASH_NO_UPDATE or CRYPTO_HASH_NO_UPDATE is specified.
293 */
294 if (prov_desc->pd_flags &
295 (CRYPTO_HASH_NO_UPDATE | CRYPTO_HMAC_NO_UPDATE)) {
296 kcf_req_params_t params;
297 crypto_provider_ext_info_t ext_info;
298
299 if (KCF_PROV_PROVMGMT_OPS(prov_desc) == NULL)
300 goto undo_then_bail;
301
302 bzero(&ext_info, sizeof (ext_info));
303 KCF_WRAP_PROVMGMT_OPS_PARAMS(¶ms,
304 KCF_OP_MGMT_EXTINFO,
305 0, NULL, 0, NULL, 0, NULL, &ext_info, prov_desc);
306 ret = kcf_submit_request(prov_desc, NULL, NULL,
307 ¶ms, B_FALSE);
308 if (ret != CRYPTO_SUCCESS)
309 goto undo_then_bail;
310
311 if (prov_desc->pd_flags & CRYPTO_HASH_NO_UPDATE) {
312 prov_desc->pd_hash_limit =
313 ext_info.ei_hash_max_input_len;
314 }
315 if (prov_desc->pd_flags & CRYPTO_HMAC_NO_UPDATE) {
316 prov_desc->pd_hmac_limit =
317 ext_info.ei_hmac_max_input_len;
318 }
319 }
320 }
321
322 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
323 /*
324 * Create the kstat for this provider. There is a kstat
325 * installed for each successfully registered provider.
326 * This kstat is deleted, when the provider unregisters.
327 */
328 if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
329 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
330 prov_desc->pd_name, "provider_stats");
331 } else {
332 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
333 prov_desc->pd_name, prov_desc->pd_instance,
334 prov_desc->pd_prov_id, "provider_stats");
335 }
336
337 prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
338 KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
339 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
340
341 if (prov_desc->pd_kstat != NULL) {
342 bcopy(&kcf_stats_ks_data_template,
343 &prov_desc->pd_ks_data,
344 sizeof (kcf_stats_ks_data_template));
345 prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
346 KCF_PROV_REFHOLD(prov_desc);
347 prov_desc->pd_kstat->ks_private = prov_desc;
348 prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
349 kstat_install(prov_desc->pd_kstat);
350 }
351 }
352
353 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
354 process_logical_providers(info, prov_desc);
355
356 mutex_enter(&prov_desc->pd_lock);
357 prov_desc->pd_state = KCF_PROV_READY;
358 mutex_exit(&prov_desc->pd_lock);
359 kcf_do_notify(prov_desc, B_TRUE);
360
361 exit:
362 *handle = prov_desc->pd_kcf_prov_handle;
363 KCF_PROV_REFRELE(prov_desc);
364 return (CRYPTO_SUCCESS);
365
366 undo_then_bail:
367 undo_register_provider(prov_desc, B_TRUE);
368 ret = CRYPTO_FAILED;
369 bail:
370 KCF_PROV_REFRELE(prov_desc);
371
372 errormsg:
373 if (ret != CRYPTO_SUCCESS && sys_shutdown == 0) {
374 switch (ret) {
375 case CRYPTO_FAILED:
376 cmn_err(CE_WARN, "%s failed when registering with the "
377 "Cryptographic Framework.",
378 info->pi_provider_description);
379 break;
380
381 case CRYPTO_MODVERIFICATION_FAILED:
382 cmn_err(CE_WARN, "%s failed module verification when "
383 "registering with the Cryptographic Framework.",
384 info->pi_provider_description);
385 break;
386
387 case CRYPTO_ARGUMENTS_BAD:
388 cmn_err(CE_WARN, "%s provided bad arguments and was "
389 "not registered with the Cryptographic Framework.",
390 info->pi_provider_description);
391 break;
392
393 case CRYPTO_VERSION_MISMATCH:
394 cmn_err(CE_WARN, "%s was not registered with the "
395 "Cryptographic Framework as there is a SPI version "
396 "mismatch (%d) error.",
397 info->pi_provider_description,
398 info->pi_interface_version);
399 break;
400
401 case CRYPTO_FIPS140_ERROR:
402 cmn_err(CE_WARN, "%s was not registered with the "
403 "Cryptographic Framework as there was a FIPS 140 "
404 "validation error.", info->pi_provider_description);
405 break;
406
407 default:
408 cmn_err(CE_WARN, "%s did not register with the "
409 "Cryptographic Framework. (0x%x)",
410 info->pi_provider_description, ret);
411 };
412 }
413
414 return (ret);
415 }
416
417 /* Return the number of holds on a provider. */
418 int
kcf_get_refcnt(kcf_provider_desc_t * pd,boolean_t do_lock)419 kcf_get_refcnt(kcf_provider_desc_t *pd, boolean_t do_lock)
420 {
421 int i;
422 int refcnt = 0;
423
424 if (do_lock)
425 for (i = 0; i < pd->pd_nbins; i++)
426 mutex_enter(&(pd->pd_percpu_bins[i].kp_lock));
427
428 for (i = 0; i < pd->pd_nbins; i++)
429 refcnt += pd->pd_percpu_bins[i].kp_holdcnt;
430
431 if (do_lock)
432 for (i = 0; i < pd->pd_nbins; i++)
433 mutex_exit(&(pd->pd_percpu_bins[i].kp_lock));
434
435 return (refcnt);
436 }
437
438 /*
439 * This routine is used to notify the framework when a provider is being
440 * removed. Hardware providers call this routine in their detach routines.
441 * Software providers call this routine in their _fini() routine.
442 */
443 int
crypto_unregister_provider(crypto_kcf_provider_handle_t handle)444 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
445 {
446 uint_t mech_idx;
447 kcf_provider_desc_t *desc;
448 kcf_prov_state_t saved_state;
449 int ret = CRYPTO_SUCCESS;
450
451 /* lookup provider descriptor */
452 if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) ==
453 NULL) {
454 ret = CRYPTO_UNKNOWN_PROVIDER;
455 goto errormsg;
456 }
457
458 mutex_enter(&desc->pd_lock);
459 /*
460 * Check if any other thread is disabling or removing
461 * this provider. We return if this is the case.
462 */
463 if (desc->pd_state >= KCF_PROV_DISABLED) {
464 mutex_exit(&desc->pd_lock);
465 /* Release reference held by kcf_prov_tab_lookup(). */
466 KCF_PROV_REFRELE(desc);
467 ret = CRYPTO_BUSY;
468 goto errormsg;
469 }
470
471 saved_state = desc->pd_state;
472 desc->pd_state = KCF_PROV_UNREGISTERING;
473
474 if (saved_state == KCF_PROV_BUSY) {
475 /*
476 * The per-provider taskq threads may be waiting. We
477 * signal them so that they can start failing requests.
478 */
479 cv_broadcast(&desc->pd_resume_cv);
480 }
481
482 mutex_exit(&desc->pd_lock);
483
484 if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
485 remove_provider(desc);
486 }
487
488 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
489 /* remove the provider from the mechanisms tables */
490 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
491 mech_idx++) {
492 kcf_remove_mech_provider(
493 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
494 }
495 }
496
497 /* remove provider from providers table */
498 if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
499 CRYPTO_SUCCESS) {
500 /* Release reference held by kcf_prov_tab_lookup(). */
501 KCF_PROV_REFRELE(desc);
502 ret = CRYPTO_UNKNOWN_PROVIDER;
503 goto errormsg;
504 }
505
506 delete_kstat(desc);
507
508 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
509 /*
510 * Wait till the existing requests with the provider complete
511 * and all the holds are released. All the holds on a software
512 * provider are from kernel clients and the hold time
513 * is expected to be short. So, we won't be stuck here forever.
514 */
515 while (kcf_get_refcnt(desc, B_TRUE) > 1) {
516 /* wait 1 second and try again. */
517 delay(1 * drv_usectohz(1000000));
518 }
519 } else {
520 int i;
521 kcf_prov_cpu_t *mp;
522
523 /*
524 * Wait until requests that have been sent to the provider
525 * complete.
526 */
527 for (i = 0; i < desc->pd_nbins; i++) {
528 mp = &(desc->pd_percpu_bins[i]);
529
530 mutex_enter(&mp->kp_lock);
531 while (mp->kp_jobcnt > 0) {
532 cv_wait(&mp->kp_cv, &mp->kp_lock);
533 }
534 mutex_exit(&mp->kp_lock);
535 }
536 }
537
538 mutex_enter(&desc->pd_lock);
539 desc->pd_state = KCF_PROV_UNREGISTERED;
540 mutex_exit(&desc->pd_lock);
541
542 kcf_do_notify(desc, B_FALSE);
543
544 mutex_enter(&prov_tab_mutex);
545 /* Release reference held by kcf_prov_tab_lookup(). */
546 KCF_PROV_REFRELE(desc);
547
548 if (kcf_get_refcnt(desc, B_TRUE) == 0) {
549 /* kcf_free_provider_desc drops prov_tab_mutex */
550 kcf_free_provider_desc(desc);
551 } else {
552 ASSERT(desc->pd_prov_type != CRYPTO_SW_PROVIDER);
553 /*
554 * We could avoid this if /dev/crypto can proactively
555 * remove any holds on us from a dormant PKCS #11 app.
556 * For now, we check the provider table for
557 * KCF_PROV_UNREGISTERED entries when a provider is
558 * added to the table or when a provider is removed from it
559 * and free them when refcnt reaches zero.
560 */
561 kcf_need_provtab_walk = B_TRUE;
562 mutex_exit(&prov_tab_mutex);
563 }
564
565 errormsg:
566 if (ret != CRYPTO_SUCCESS && sys_shutdown == 0) {
567 switch (ret) {
568 case CRYPTO_UNKNOWN_PROVIDER:
569 cmn_err(CE_WARN, "Unknown provider \"%s\" was "
570 "requested to unregister from the cryptographic "
571 "framework.", desc->pd_description);
572 break;
573
574 case CRYPTO_BUSY:
575 cmn_err(CE_WARN, "%s could not be unregistered from "
576 "the Cryptographic Framework as it is busy.",
577 desc->pd_description);
578 break;
579
580 default:
581 cmn_err(CE_WARN, "%s did not unregister with the "
582 "Cryptographic Framework. (0x%x)",
583 desc->pd_description, ret);
584 };
585 }
586
587 return (ret);
588 }
589
590 /*
591 * This routine is used to notify the framework that the state of
592 * a cryptographic provider has changed. Valid state codes are:
593 *
594 * CRYPTO_PROVIDER_READY
595 * The provider indicates that it can process more requests. A provider
596 * will notify with this event if it previously has notified us with a
597 * CRYPTO_PROVIDER_BUSY.
598 *
599 * CRYPTO_PROVIDER_BUSY
600 * The provider can not take more requests.
601 *
602 * CRYPTO_PROVIDER_FAILED
603 * The provider encountered an internal error. The framework will not
604 * be sending any more requests to the provider. The provider may notify
605 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
606 *
607 * This routine can be called from user or interrupt context.
608 */
609 void
crypto_provider_notification(crypto_kcf_provider_handle_t handle,uint_t state)610 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
611 {
612 kcf_provider_desc_t *pd;
613
614 /* lookup the provider from the given handle */
615 if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
616 return;
617
618 mutex_enter(&pd->pd_lock);
619
620 if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
621 goto out;
622
623 if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
624 cmn_err(CE_WARN, "crypto_provider_notification: "
625 "logical provider (%x) ignored\n", handle);
626 goto out;
627 }
628 switch (state) {
629 case CRYPTO_PROVIDER_READY:
630 switch (pd->pd_state) {
631 case KCF_PROV_BUSY:
632 pd->pd_state = KCF_PROV_READY;
633 /*
634 * Signal the per-provider taskq threads that they
635 * can start submitting requests.
636 */
637 cv_broadcast(&pd->pd_resume_cv);
638 break;
639
640 case KCF_PROV_FAILED:
641 /*
642 * The provider recovered from the error. Let us
643 * use it now.
644 */
645 pd->pd_state = KCF_PROV_READY;
646 break;
647 }
648 break;
649
650 case CRYPTO_PROVIDER_BUSY:
651 switch (pd->pd_state) {
652 case KCF_PROV_READY:
653 pd->pd_state = KCF_PROV_BUSY;
654 break;
655 }
656 break;
657
658 case CRYPTO_PROVIDER_FAILED:
659 /*
660 * We note the failure and return. The per-provider taskq
661 * threads check this flag and start failing the
662 * requests, if it is set. See process_req_hwp() for details.
663 */
664 switch (pd->pd_state) {
665 case KCF_PROV_READY:
666 pd->pd_state = KCF_PROV_FAILED;
667 break;
668
669 case KCF_PROV_BUSY:
670 pd->pd_state = KCF_PROV_FAILED;
671 /*
672 * The per-provider taskq threads may be waiting. We
673 * signal them so that they can start failing requests.
674 */
675 cv_broadcast(&pd->pd_resume_cv);
676 break;
677 }
678 break;
679 }
680 out:
681 mutex_exit(&pd->pd_lock);
682 KCF_PROV_REFRELE(pd);
683 }
684
685 /*
686 * This routine is used to notify the framework the result of
687 * an asynchronous request handled by a provider. Valid error
688 * codes are the same as the CRYPTO_* errors defined in common.h.
689 *
690 * This routine can be called from user or interrupt context.
691 */
692 void
crypto_op_notification(crypto_req_handle_t handle,int error)693 crypto_op_notification(crypto_req_handle_t handle, int error)
694 {
695 kcf_call_type_t ctype;
696
697 if (handle == NULL)
698 return;
699
700 if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
701 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
702
703 KCF_PROV_JOB_RELE_STAT(sreq->sn_mp, (error != CRYPTO_SUCCESS));
704 kcf_sop_done(sreq, error);
705 } else {
706 kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
707
708 ASSERT(ctype == CRYPTO_ASYNCH);
709 KCF_PROV_JOB_RELE_STAT(areq->an_mp, (error != CRYPTO_SUCCESS));
710 kcf_aop_done(areq, error);
711 }
712 }
713
714 /*
715 * This routine is used by software providers to determine
716 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
717 * Note that hardware providers can always use KM_SLEEP. So,
718 * they do not need to call this routine.
719 *
720 * This routine can be called from user or interrupt context.
721 */
722 int
crypto_kmflag(crypto_req_handle_t handle)723 crypto_kmflag(crypto_req_handle_t handle)
724 {
725 return (REQHNDL2_KMFLAG(handle));
726 }
727
728 /*
729 * Process the mechanism info structures specified by the provider
730 * during registration. A NULL crypto_provider_info_t indicates
731 * an already initialized provider descriptor.
732 *
733 * Mechanisms are not added to the kernel's mechanism table if the
734 * provider is a logical provider.
735 *
736 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
737 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
738 * if the table of mechanisms is full.
739 */
740 static int
init_prov_mechs(crypto_provider_info_t * info,kcf_provider_desc_t * desc)741 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
742 {
743 uint_t mech_idx;
744 uint_t cleanup_idx;
745 int err = CRYPTO_SUCCESS;
746 kcf_prov_mech_desc_t *pmd;
747 int desc_use_count = 0;
748 int mcount = desc->pd_mech_list_count;
749
750 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
751 if (info != NULL) {
752 ASSERT(info->pi_mechanisms != NULL);
753 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
754 sizeof (crypto_mech_info_t) * mcount);
755 }
756 return (CRYPTO_SUCCESS);
757 }
758
759 /*
760 * Copy the mechanism list from the provider info to the provider
761 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
762 * element if the provider has random_ops since we keep an internal
763 * mechanism, SUN_RANDOM, in this case.
764 */
765 if (info != NULL) {
766 if (info->pi_ops_vector->co_random_ops != NULL) {
767 crypto_mech_info_t *rand_mi;
768
769 /*
770 * Need the following check as it is possible to have
771 * a provider that implements just random_ops and has
772 * pi_mechanisms == NULL.
773 */
774 if (info->pi_mechanisms != NULL) {
775 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
776 sizeof (crypto_mech_info_t) * (mcount - 1));
777 }
778 rand_mi = &desc->pd_mechanisms[mcount - 1];
779
780 bzero(rand_mi, sizeof (crypto_mech_info_t));
781 (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
782 CRYPTO_MAX_MECH_NAME);
783 rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
784 } else {
785 ASSERT(info->pi_mechanisms != NULL);
786 bcopy(info->pi_mechanisms, desc->pd_mechanisms,
787 sizeof (crypto_mech_info_t) * mcount);
788 }
789 }
790
791 /*
792 * For each mechanism support by the provider, add the provider
793 * to the corresponding KCF mechanism mech_entry chain.
794 */
795 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
796 crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
797
798 if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
799 (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
800 err = CRYPTO_ARGUMENTS_BAD;
801 break;
802 }
803
804 if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
805 KCF_SUCCESS)
806 break;
807
808 if (pmd == NULL)
809 continue;
810
811 /* The provider will be used for this mechanism */
812 desc_use_count++;
813 }
814
815 /*
816 * Don't allow multiple software providers with disabled mechanisms
817 * to register. Subsequent enabling of mechanisms will result in
818 * an unsupported configuration, i.e. multiple software providers
819 * per mechanism.
820 */
821 if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
822 return (CRYPTO_ARGUMENTS_BAD);
823
824 if (err == KCF_SUCCESS)
825 return (CRYPTO_SUCCESS);
826
827 /*
828 * An error occurred while adding the mechanism, cleanup
829 * and bail.
830 */
831 for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
832 kcf_remove_mech_provider(
833 desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
834 }
835
836 if (err == KCF_MECH_TAB_FULL)
837 return (CRYPTO_HOST_MEMORY);
838
839 return (CRYPTO_ARGUMENTS_BAD);
840 }
841
842 /*
843 * Update routine for kstat. Only privileged users are allowed to
844 * access this information, since this information is sensitive.
845 * There are some cryptographic attacks (e.g. traffic analysis)
846 * which can use this information.
847 */
848 static int
kcf_prov_kstat_update(kstat_t * ksp,int rw)849 kcf_prov_kstat_update(kstat_t *ksp, int rw)
850 {
851 kcf_prov_stats_t *ks_data;
852 kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
853 int i;
854
855 if (rw == KSTAT_WRITE)
856 return (EACCES);
857
858 ks_data = ksp->ks_data;
859
860 if (secpolicy_sys_config(CRED(), B_TRUE) != 0) {
861 ks_data->ps_ops_total.value.ui64 = 0;
862 ks_data->ps_ops_passed.value.ui64 = 0;
863 ks_data->ps_ops_failed.value.ui64 = 0;
864 ks_data->ps_ops_busy_rval.value.ui64 = 0;
865 } else {
866 uint64_t dtotal, ftotal, btotal;
867
868 dtotal = ftotal = btotal = 0;
869 /* No locking done since an exact count is not required. */
870 for (i = 0; i < pd->pd_nbins; i++) {
871 dtotal += pd->pd_percpu_bins[i].kp_ndispatches;
872 ftotal += pd->pd_percpu_bins[i].kp_nfails;
873 btotal += pd->pd_percpu_bins[i].kp_nbusy_rval;
874 }
875
876 ks_data->ps_ops_total.value.ui64 = dtotal;
877 ks_data->ps_ops_failed.value.ui64 = ftotal;
878 ks_data->ps_ops_busy_rval.value.ui64 = btotal;
879 ks_data->ps_ops_passed.value.ui64 = dtotal - ftotal - btotal;
880 }
881
882 return (0);
883 }
884
885
886 /*
887 * Utility routine called from failure paths in crypto_register_provider()
888 * and from crypto_load_soft_disabled().
889 */
890 void
undo_register_provider(kcf_provider_desc_t * desc,boolean_t remove_prov)891 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
892 {
893 uint_t mech_idx;
894
895 /* remove the provider from the mechanisms tables */
896 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
897 mech_idx++) {
898 kcf_remove_mech_provider(
899 desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
900 }
901
902 /* remove provider from providers table */
903 if (remove_prov)
904 (void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
905 }
906
907 /*
908 * Utility routine called from crypto_load_soft_disabled(). Callers
909 * should have done a prior undo_register_provider().
910 */
911 void
redo_register_provider(kcf_provider_desc_t * pd)912 redo_register_provider(kcf_provider_desc_t *pd)
913 {
914 /* process the mechanisms supported by the provider */
915 (void) init_prov_mechs(NULL, pd);
916
917 /*
918 * Hold provider in providers table. We should not call
919 * kcf_prov_tab_add_provider() here as the provider descriptor
920 * is still valid which means it has an entry in the provider
921 * table.
922 */
923 KCF_PROV_REFHOLD(pd);
924 }
925
926 /*
927 * Add provider (p1) to another provider's array of providers (p2).
928 * Hardware and logical providers use this array to cross-reference
929 * each other.
930 */
931 static void
add_provider_to_array(kcf_provider_desc_t * p1,kcf_provider_desc_t * p2)932 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
933 {
934 kcf_provider_list_t *new;
935
936 new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
937 mutex_enter(&p2->pd_lock);
938 new->pl_next = p2->pd_provider_list;
939 p2->pd_provider_list = new;
940 new->pl_provider = p1;
941 mutex_exit(&p2->pd_lock);
942 }
943
944 /*
945 * Remove provider (p1) from another provider's array of providers (p2).
946 * Hardware and logical providers use this array to cross-reference
947 * each other.
948 */
949 static void
remove_provider_from_array(kcf_provider_desc_t * p1,kcf_provider_desc_t * p2)950 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
951 {
952
953 kcf_provider_list_t *pl = NULL, **prev;
954
955 mutex_enter(&p2->pd_lock);
956 for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
957 pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
958 if (pl->pl_provider == p1) {
959 break;
960 }
961 }
962
963 if (p1 == NULL) {
964 mutex_exit(&p2->pd_lock);
965 return;
966 }
967
968 /* detach and free kcf_provider_list structure */
969 *prev = pl->pl_next;
970 kmem_free(pl, sizeof (*pl));
971 mutex_exit(&p2->pd_lock);
972 }
973
974 /*
975 * Convert an array of logical provider handles (crypto_provider_id)
976 * stored in a crypto_provider_info structure into an array of provider
977 * descriptors (kcf_provider_desc_t) attached to a logical provider.
978 */
979 static void
process_logical_providers(crypto_provider_info_t * info,kcf_provider_desc_t * hp)980 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
981 {
982 kcf_provider_desc_t *lp;
983 crypto_provider_id_t handle;
984 int count = info->pi_logical_provider_count;
985 int i;
986
987 /* add hardware provider to each logical provider */
988 for (i = 0; i < count; i++) {
989 handle = info->pi_logical_providers[i];
990 lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
991 if (lp == NULL) {
992 continue;
993 }
994 add_provider_to_array(hp, lp);
995 hp->pd_flags |= KCF_LPROV_MEMBER;
996
997 /*
998 * A hardware provider has to have the provider descriptor of
999 * every logical provider it belongs to, so it can be removed
1000 * from the logical provider if the hardware provider
1001 * unregisters from the framework.
1002 */
1003 add_provider_to_array(lp, hp);
1004 KCF_PROV_REFRELE(lp);
1005 }
1006 }
1007
1008 /*
1009 * This routine removes a provider from all of the logical or
1010 * hardware providers it belongs to, and frees the provider's
1011 * array of pointers to providers.
1012 */
1013 static void
remove_provider(kcf_provider_desc_t * pp)1014 remove_provider(kcf_provider_desc_t *pp)
1015 {
1016 kcf_provider_desc_t *p;
1017 kcf_provider_list_t *e, *next;
1018
1019 mutex_enter(&pp->pd_lock);
1020 for (e = pp->pd_provider_list; e != NULL; e = next) {
1021 p = e->pl_provider;
1022 remove_provider_from_array(pp, p);
1023 if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
1024 p->pd_provider_list == NULL)
1025 p->pd_flags &= ~KCF_LPROV_MEMBER;
1026 next = e->pl_next;
1027 kmem_free(e, sizeof (*e));
1028 }
1029 pp->pd_provider_list = NULL;
1030 mutex_exit(&pp->pd_lock);
1031 }
1032
1033 /*
1034 * Dispatch events as needed for a provider. is_added flag tells
1035 * whether the provider is registering or unregistering.
1036 */
1037 void
kcf_do_notify(kcf_provider_desc_t * prov_desc,boolean_t is_added)1038 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
1039 {
1040 int i;
1041 crypto_notify_event_change_t ec;
1042
1043 ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
1044
1045 /*
1046 * Inform interested clients of the mechanisms becoming
1047 * available/unavailable. We skip this for logical providers
1048 * as they do not affect mechanisms.
1049 */
1050 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
1051 ec.ec_provider_type = prov_desc->pd_prov_type;
1052 ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
1053 CRYPTO_MECH_REMOVED;
1054 for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
1055 /* Skip any mechanisms not allowed by the policy */
1056 if (is_mech_disabled(prov_desc,
1057 prov_desc->pd_mechanisms[i].cm_mech_name))
1058 continue;
1059
1060 (void) strncpy(ec.ec_mech_name,
1061 prov_desc->pd_mechanisms[i].cm_mech_name,
1062 CRYPTO_MAX_MECH_NAME);
1063 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
1064 }
1065
1066 }
1067
1068 /*
1069 * Inform interested clients about the new or departing provider.
1070 * In case of a logical provider, we need to notify the event only
1071 * for the logical provider and not for the underlying
1072 * providers which are known by the KCF_LPROV_MEMBER bit.
1073 */
1074 if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
1075 (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
1076 kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
1077 CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
1078 }
1079 }
1080
1081 static void
delete_kstat(kcf_provider_desc_t * desc)1082 delete_kstat(kcf_provider_desc_t *desc)
1083 {
1084 /* destroy the kstat created for this provider */
1085 if (desc->pd_kstat != NULL) {
1086 kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
1087
1088 /* release reference held by desc->pd_kstat->ks_private */
1089 ASSERT(desc == kspd);
1090 kstat_delete(kspd->pd_kstat);
1091 desc->pd_kstat = NULL;
1092 KCF_PROV_REFRELE(kspd);
1093 }
1094 }
1095