1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file is part of the core Kernel Cryptographic Framework. 30 * It implements the SPI functions exported to cryptographic 31 * providers. 32 */ 33 34 #include <sys/ksynch.h> 35 #include <sys/cmn_err.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/modctl.h> 39 #include <sys/crypto/common.h> 40 #include <sys/crypto/impl.h> 41 #include <sys/crypto/sched_impl.h> 42 #include <sys/crypto/spi.h> 43 #include <sys/taskq.h> 44 #include <sys/disp.h> 45 #include <sys/kstat.h> 46 #include <sys/policy.h> 47 48 /* 49 * minalloc and maxalloc values to be used for taskq_create(). 50 */ 51 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN; 52 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX; 53 54 static void free_provider_list(kcf_provider_list_t *); 55 static void remove_provider(kcf_provider_desc_t *); 56 static void process_logical_providers(crypto_provider_info_t *, 57 kcf_provider_desc_t *); 58 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *); 59 static int kcf_prov_kstat_update(kstat_t *, int); 60 61 static kcf_prov_stats_t kcf_stats_ks_data_template = { 62 { "kcf_ops_total", KSTAT_DATA_UINT64 }, 63 { "kcf_ops_passed", KSTAT_DATA_UINT64 }, 64 { "kcf_ops_failed", KSTAT_DATA_UINT64 }, 65 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 } 66 }; 67 68 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \ 69 *((dst)->ops) = *((src)->ops); 70 71 /* 72 * Copy an ops vector from src to dst. Used during provider registration 73 * to copy the ops vector from the provider info structure to the 74 * provider descriptor maintained by KCF. 75 * Copying the ops vector specified by the provider is needed since the 76 * framework does not require the provider info structure to be 77 * persistent. 78 */ 79 static void 80 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 81 { 82 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops); 83 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops); 84 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops); 85 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops); 86 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops); 87 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops); 88 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops); 89 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops); 90 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops); 91 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops); 92 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops); 93 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops); 94 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops); 95 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops); 96 } 97 98 static void 99 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 100 { 101 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops); 102 } 103 104 static void 105 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 106 { 107 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops); 108 } 109 110 /* 111 * This routine is used to add cryptographic providers to the KEF framework. 112 * Providers pass a crypto_provider_info structure to crypto_register_provider() 113 * and get back a handle. The crypto_provider_info structure contains a 114 * list of mechanisms supported by the provider and an ops vector containing 115 * provider entry points. Hardware providers call this routine in their attach 116 * routines. Software providers call this routine in their _init() routine. 117 */ 118 int 119 crypto_register_provider(crypto_provider_info_t *info, 120 crypto_kcf_provider_handle_t *handle) 121 { 122 int i; 123 int vstatus = 0; 124 struct modctl *mcp; 125 char *name; 126 char ks_name[KSTAT_STRLEN]; 127 crypto_notify_event_change_t ec; 128 129 kcf_provider_desc_t *prov_desc = NULL; 130 int ret = CRYPTO_ARGUMENTS_BAD; 131 132 if (info->pi_interface_version > CRYPTO_SPI_VERSION_3) 133 return (CRYPTO_VERSION_MISMATCH); 134 135 /* 136 * Check provider type, must be software, hardware, or logical. 137 */ 138 if (info->pi_provider_type != CRYPTO_HW_PROVIDER && 139 info->pi_provider_type != CRYPTO_SW_PROVIDER && 140 info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) 141 return (CRYPTO_ARGUMENTS_BAD); 142 143 /* 144 * Allocate and initialize a new provider descriptor. We also 145 * hold it and release it when done. 146 */ 147 prov_desc = kcf_alloc_provider_desc(info); 148 KCF_PROV_REFHOLD(prov_desc); 149 150 prov_desc->pd_prov_type = info->pi_provider_type; 151 152 /* provider-private handle, opaque to KCF */ 153 prov_desc->pd_prov_handle = info->pi_provider_handle; 154 155 /* copy provider description string */ 156 if (info->pi_provider_description != NULL) { 157 /* 158 * pi_provider_descriptor is a string that can contain 159 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters 160 * INCLUDING the terminating null character. A bcopy() 161 * is necessary here as pd_description should not have 162 * a null character. See comments in kcf_alloc_provider_desc() 163 * for details on pd_description field. 164 */ 165 bcopy(info->pi_provider_description, prov_desc->pd_description, 166 min(strlen(info->pi_provider_description), 167 CRYPTO_PROVIDER_DESCR_MAX_LEN)); 168 } 169 170 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) { 171 if (info->pi_ops_vector == NULL) { 172 goto bail; 173 } 174 copy_ops_vector_v1(info->pi_ops_vector, 175 prov_desc->pd_ops_vector); 176 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) { 177 copy_ops_vector_v2(info->pi_ops_vector, 178 prov_desc->pd_ops_vector); 179 prov_desc->pd_flags = info->pi_flags; 180 } 181 if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) { 182 copy_ops_vector_v3(info->pi_ops_vector, 183 prov_desc->pd_ops_vector); 184 } 185 } 186 187 /* object_ops and nostore_key_ops are mutually exclusive */ 188 if (prov_desc->pd_ops_vector->co_object_ops && 189 prov_desc->pd_ops_vector->co_nostore_key_ops) { 190 goto bail; 191 } 192 /* 193 * For software providers, copy the module name and module ID. 194 * For hardware providers, copy the driver name and instance. 195 */ 196 switch (info->pi_provider_type) { 197 case CRYPTO_SW_PROVIDER: 198 if (info->pi_provider_dev.pd_sw == NULL) 199 goto bail; 200 201 if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL) 202 goto bail; 203 204 prov_desc->pd_module_id = mcp->mod_id; 205 name = mcp->mod_modname; 206 break; 207 208 case CRYPTO_HW_PROVIDER: 209 case CRYPTO_LOGICAL_PROVIDER: 210 if (info->pi_provider_dev.pd_hw == NULL) 211 goto bail; 212 213 prov_desc->pd_instance = 214 ddi_get_instance(info->pi_provider_dev.pd_hw); 215 name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw); 216 break; 217 } 218 if (name == NULL) 219 goto bail; 220 221 prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 222 (void) strcpy(prov_desc->pd_name, name); 223 224 if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL) 225 goto bail; 226 227 /* process the mechanisms supported by the provider */ 228 if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS) 229 goto bail; 230 231 /* 232 * Add provider to providers tables, also sets the descriptor 233 * pd_prov_id field. 234 */ 235 if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) { 236 undo_register_provider(prov_desc, B_FALSE); 237 goto bail; 238 } 239 240 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) { 241 if ((vstatus = kcf_verify_signature(prov_desc)) == 242 CRYPTO_MODVERIFICATION_FAILED) { 243 undo_register_provider(prov_desc, B_TRUE); 244 ret = CRYPTO_MODVERIFICATION_FAILED; 245 goto bail; 246 } 247 } 248 249 /* 250 * We create a taskq only for a hardware provider. The global 251 * software queue is used for software providers. The taskq 252 * is limited to one thread since tasks are guaranteed to be 253 * executed in the order they are scheduled, if nthreads == 1. We 254 * pass TASKQ_PREPOPULATE flag to keep some entries cached to 255 * improve performance. 256 */ 257 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) 258 prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq", 259 1, minclsyspri, crypto_taskq_minalloc, 260 crypto_taskq_maxalloc, TASKQ_PREPOPULATE); 261 else 262 prov_desc->pd_sched_info.ks_taskq = NULL; 263 264 /* no kernel session to logical providers */ 265 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 266 /* 267 * Open a session for session-oriented providers. This session 268 * is used for all kernel consumers. This is fine as a provider 269 * is required to support multiple thread access to a session. 270 * We can do this only after the taskq has been created as we 271 * do a kcf_submit_request() to open the session. 272 */ 273 if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) { 274 kcf_req_params_t params; 275 276 KCF_WRAP_SESSION_OPS_PARAMS(¶ms, 277 KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0, 278 CRYPTO_USER, NULL, 0, prov_desc); 279 ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms, 280 B_FALSE); 281 282 if (ret != CRYPTO_SUCCESS) { 283 undo_register_provider(prov_desc, B_TRUE); 284 ret = CRYPTO_FAILED; 285 goto bail; 286 } 287 } 288 } 289 290 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 291 /* 292 * Create the kstat for this provider. There is a kstat 293 * installed for each successfully registered provider. 294 * This kstat is deleted, when the provider unregisters. 295 */ 296 if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 297 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s", 298 prov_desc->pd_name, "provider_stats"); 299 } else { 300 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s", 301 prov_desc->pd_name, prov_desc->pd_instance, 302 prov_desc->pd_prov_id, "provider_stats"); 303 } 304 305 prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto", 306 KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) / 307 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 308 309 if (prov_desc->pd_kstat != NULL) { 310 bcopy(&kcf_stats_ks_data_template, 311 &prov_desc->pd_ks_data, 312 sizeof (kcf_stats_ks_data_template)); 313 prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data; 314 KCF_PROV_REFHOLD(prov_desc); 315 KCF_PROV_IREFHOLD(prov_desc); 316 prov_desc->pd_kstat->ks_private = prov_desc; 317 prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update; 318 kstat_install(prov_desc->pd_kstat); 319 } 320 } 321 322 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) 323 process_logical_providers(info, prov_desc); 324 325 /* 326 * Inform interested clients of the mechanisms becoming 327 * available. We skip this for logical providers as they 328 * do not affect mechanisms. 329 */ 330 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 331 ec.ec_provider_type = prov_desc->pd_prov_type; 332 ec.ec_change = CRYPTO_MECH_ADDED; 333 for (i = 0; i < prov_desc->pd_mech_list_count; i++) { 334 /* Skip any mechanisms not allowed by the policy */ 335 if (is_mech_disabled(prov_desc, 336 prov_desc->pd_mechanisms[i].cm_mech_name)) 337 continue; 338 339 (void) strncpy(ec.ec_mech_name, 340 prov_desc->pd_mechanisms[i].cm_mech_name, 341 CRYPTO_MAX_MECH_NAME); 342 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec); 343 } 344 345 } 346 347 /* 348 * Inform interested clients of the new provider. In case of a 349 * logical provider, we need to notify the event only 350 * for the logical provider and not for the underlying 351 * providers which are known by pi_logical_provider_count > 0. 352 */ 353 if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER || 354 info->pi_logical_provider_count == 0) 355 kcf_walk_ntfylist(CRYPTO_EVENT_PROVIDER_REGISTERED, prov_desc); 356 357 mutex_enter(&prov_desc->pd_lock); 358 prov_desc->pd_state = (vstatus == 0) ? KCF_PROV_READY : 359 KCF_PROV_UNVERIFIED; 360 mutex_exit(&prov_desc->pd_lock); 361 362 *handle = prov_desc->pd_kcf_prov_handle; 363 KCF_PROV_REFRELE(prov_desc); 364 return (CRYPTO_SUCCESS); 365 366 bail: 367 KCF_PROV_REFRELE(prov_desc); 368 return (ret); 369 } 370 371 /* 372 * This routine is used to notify the framework when a provider is being 373 * removed. Hardware providers call this routine in their detach routines. 374 * Software providers call this routine in their _fini() routine. 375 */ 376 int 377 crypto_unregister_provider(crypto_kcf_provider_handle_t handle) 378 { 379 int i; 380 uint_t mech_idx; 381 kcf_provider_desc_t *desc; 382 crypto_notify_event_change_t ec; 383 kcf_prov_state_t saved_state; 384 385 /* lookup provider descriptor */ 386 if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL) 387 return (CRYPTO_UNKNOWN_PROVIDER); 388 389 mutex_enter(&desc->pd_lock); 390 /* 391 * Check if any other thread is disabling or removing 392 * this provider. We return if this is the case. 393 */ 394 if (desc->pd_state >= KCF_PROV_DISABLED) { 395 mutex_exit(&desc->pd_lock); 396 /* Release reference held by kcf_prov_tab_lookup(). */ 397 KCF_PROV_REFRELE(desc); 398 return (CRYPTO_BUSY); 399 } 400 401 saved_state = desc->pd_state; 402 desc->pd_state = KCF_PROV_REMOVED; 403 404 if (saved_state == KCF_PROV_BUSY) { 405 /* 406 * The per-provider taskq thread may be waiting. We 407 * signal it so that it can start failing requests. 408 * Note that we do not need a cv_broadcast() as we keep 409 * only a single thread per taskq. 410 */ 411 cv_signal(&desc->pd_resume_cv); 412 } 413 414 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 415 /* 416 * Check if this provider is currently being used. 417 * pd_irefcnt is the number of holds from the internal 418 * structures. We add one to account for the above lookup. 419 */ 420 if (desc->pd_refcnt > desc->pd_irefcnt + 1) { 421 desc->pd_state = saved_state; 422 mutex_exit(&desc->pd_lock); 423 /* Release reference held by kcf_prov_tab_lookup(). */ 424 KCF_PROV_REFRELE(desc); 425 /* 426 * The administrator presumably will stop the clients 427 * thus removing the holds, when they get the busy 428 * return value. Any retry will succeed then. 429 */ 430 return (CRYPTO_BUSY); 431 } 432 } 433 mutex_exit(&desc->pd_lock); 434 435 if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) { 436 remove_provider(desc); 437 } 438 439 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 440 /* remove the provider from the mechanisms tables */ 441 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; 442 mech_idx++) { 443 kcf_remove_mech_provider( 444 desc->pd_mechanisms[mech_idx].cm_mech_name, desc); 445 } 446 } 447 448 /* remove provider from providers table */ 449 if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) != 450 CRYPTO_SUCCESS) { 451 /* Release reference held by kcf_prov_tab_lookup(). */ 452 KCF_PROV_REFRELE(desc); 453 return (CRYPTO_UNKNOWN_PROVIDER); 454 } 455 456 /* destroy the kstat created for this provider */ 457 if (desc->pd_kstat != NULL) { 458 kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private; 459 460 /* release reference held by desc->pd_kstat->ks_private */ 461 ASSERT(desc == kspd); 462 kstat_delete(kspd->pd_kstat); 463 KCF_PROV_REFRELE(kspd); 464 KCF_PROV_IREFRELE(kspd); 465 } 466 467 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 468 /* Release reference held by kcf_prov_tab_lookup(). */ 469 KCF_PROV_REFRELE(desc); 470 471 /* 472 * Wait till the existing requests complete. 473 */ 474 mutex_enter(&desc->pd_lock); 475 while (desc->pd_state != KCF_PROV_FREED) 476 cv_wait(&desc->pd_remove_cv, &desc->pd_lock); 477 mutex_exit(&desc->pd_lock); 478 } else { 479 /* 480 * Wait until requests that have been sent to the provider 481 * complete. 482 */ 483 mutex_enter(&desc->pd_lock); 484 while (desc->pd_irefcnt > 0) 485 cv_wait(&desc->pd_remove_cv, &desc->pd_lock); 486 mutex_exit(&desc->pd_lock); 487 } 488 489 /* 490 * Inform interested clients of the mechanisms becoming 491 * unavailable. We skip this for logical providers as they 492 * do not affect mechanisms. 493 */ 494 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 495 ec.ec_provider_type = desc->pd_prov_type; 496 ec.ec_change = CRYPTO_MECH_REMOVED; 497 for (i = 0; i < desc->pd_mech_list_count; i++) { 498 /* Skip any mechanisms not allowed by the policy */ 499 if (is_mech_disabled(desc, 500 desc->pd_mechanisms[i].cm_mech_name)) 501 continue; 502 503 (void) strncpy(ec.ec_mech_name, 504 desc->pd_mechanisms[i].cm_mech_name, 505 CRYPTO_MAX_MECH_NAME); 506 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec); 507 } 508 509 } 510 511 /* 512 * Inform interested clients about the departing provider. 513 * In case of a logical provider, we need to notify the event only 514 * for the logical provider and not for the underlying 515 * providers which are known by the KCF_LPROV_MEMBER bit. 516 */ 517 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER || 518 (desc->pd_flags & KCF_LPROV_MEMBER) == 0) 519 kcf_walk_ntfylist(CRYPTO_EVENT_PROVIDER_UNREGISTERED, desc); 520 521 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 522 /* 523 * This is the only place where kcf_free_provider_desc() 524 * is called directly. KCF_PROV_REFRELE() should free the 525 * structure in all other places. 526 */ 527 ASSERT(desc->pd_state == KCF_PROV_FREED && 528 desc->pd_refcnt == 0); 529 kcf_free_provider_desc(desc); 530 } else { 531 KCF_PROV_REFRELE(desc); 532 } 533 534 return (CRYPTO_SUCCESS); 535 } 536 537 /* 538 * This routine is used to notify the framework that the state of 539 * a cryptographic provider has changed. Valid state codes are: 540 * 541 * CRYPTO_PROVIDER_READY 542 * The provider indicates that it can process more requests. A provider 543 * will notify with this event if it previously has notified us with a 544 * CRYPTO_PROVIDER_BUSY. 545 * 546 * CRYPTO_PROVIDER_BUSY 547 * The provider can not take more requests. 548 * 549 * CRYPTO_PROVIDER_FAILED 550 * The provider encountered an internal error. The framework will not 551 * be sending any more requests to the provider. The provider may notify 552 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error. 553 * 554 * This routine can be called from user or interrupt context. 555 */ 556 void 557 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state) 558 { 559 kcf_provider_desc_t *pd; 560 561 /* lookup the provider from the given handle */ 562 if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL) 563 return; 564 565 mutex_enter(&pd->pd_lock); 566 567 if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 568 cmn_err(CE_WARN, "crypto_provider_notification: " 569 "logical provider (%x) ignored\n", handle); 570 goto out; 571 } 572 switch (state) { 573 case CRYPTO_PROVIDER_READY: 574 switch (pd->pd_state) { 575 case KCF_PROV_BUSY: 576 pd->pd_state = KCF_PROV_READY; 577 /* 578 * Signal the per-provider taskq thread that it 579 * can start submitting requests. Note that we do 580 * not need a cv_broadcast() as we keep only a 581 * single thread per taskq. 582 */ 583 cv_signal(&pd->pd_resume_cv); 584 break; 585 586 case KCF_PROV_FAILED: 587 /* 588 * The provider recovered from the error. Let us 589 * use it now. 590 */ 591 pd->pd_state = KCF_PROV_READY; 592 break; 593 } 594 break; 595 596 case CRYPTO_PROVIDER_BUSY: 597 switch (pd->pd_state) { 598 case KCF_PROV_READY: 599 pd->pd_state = KCF_PROV_BUSY; 600 break; 601 } 602 break; 603 604 case CRYPTO_PROVIDER_FAILED: 605 /* 606 * We note the failure and return. The per-provider taskq 607 * thread checks this flag and starts failing the 608 * requests, if it is set. See process_req_hwp() for details. 609 */ 610 switch (pd->pd_state) { 611 case KCF_PROV_READY: 612 pd->pd_state = KCF_PROV_FAILED; 613 break; 614 615 case KCF_PROV_BUSY: 616 pd->pd_state = KCF_PROV_FAILED; 617 /* 618 * The per-provider taskq thread may be waiting. We 619 * signal it so that it can start failing requests. 620 */ 621 cv_signal(&pd->pd_resume_cv); 622 break; 623 } 624 break; 625 } 626 out: 627 mutex_exit(&pd->pd_lock); 628 KCF_PROV_REFRELE(pd); 629 } 630 631 /* 632 * This routine is used to notify the framework the result of 633 * an asynchronous request handled by a provider. Valid error 634 * codes are the same as the CRYPTO_* errors defined in common.h. 635 * 636 * This routine can be called from user or interrupt context. 637 */ 638 void 639 crypto_op_notification(crypto_req_handle_t handle, int error) 640 { 641 kcf_call_type_t ctype; 642 643 if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) { 644 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle; 645 646 if (error != CRYPTO_SUCCESS) 647 sreq->sn_provider->pd_sched_info.ks_nfails++; 648 KCF_PROV_IREFRELE(sreq->sn_provider); 649 kcf_sop_done(sreq, error); 650 } else { 651 kcf_areq_node_t *areq = (kcf_areq_node_t *)handle; 652 653 ASSERT(ctype == CRYPTO_ASYNCH); 654 if (error != CRYPTO_SUCCESS) 655 areq->an_provider->pd_sched_info.ks_nfails++; 656 KCF_PROV_IREFRELE(areq->an_provider); 657 kcf_aop_done(areq, error); 658 } 659 } 660 661 /* 662 * This routine is used by software providers to determine 663 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation. 664 * Note that hardware providers can always use KM_SLEEP. So, 665 * they do not need to call this routine. 666 * 667 * This routine can be called from user or interrupt context. 668 */ 669 int 670 crypto_kmflag(crypto_req_handle_t handle) 671 { 672 return (REQHNDL2_KMFLAG(handle)); 673 } 674 675 /* 676 * Process the mechanism info structures specified by the provider 677 * during registration. A NULL crypto_provider_info_t indicates 678 * an already initialized provider descriptor. 679 * 680 * Mechanisms are not added to the kernel's mechanism table if the 681 * provider is a logical provider. 682 * 683 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one 684 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY 685 * if the table of mechanisms is full. 686 */ 687 static int 688 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc) 689 { 690 uint_t mech_idx; 691 uint_t cleanup_idx; 692 int err = CRYPTO_SUCCESS; 693 kcf_prov_mech_desc_t *pmd; 694 int desc_use_count = 0; 695 int mcount = desc->pd_mech_list_count; 696 697 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 698 if (info != NULL) { 699 ASSERT(info->pi_mechanisms != NULL); 700 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 701 sizeof (crypto_mech_info_t) * mcount); 702 } 703 return (CRYPTO_SUCCESS); 704 } 705 706 /* 707 * Copy the mechanism list from the provider info to the provider 708 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t 709 * element if the provider has random_ops since we keep an internal 710 * mechanism, SUN_RANDOM, in this case. 711 */ 712 if (info != NULL) { 713 if (info->pi_ops_vector->co_random_ops != NULL) { 714 crypto_mech_info_t *rand_mi; 715 716 /* 717 * Need the following check as it is possible to have 718 * a provider that implements just random_ops and has 719 * pi_mechanisms == NULL. 720 */ 721 if (info->pi_mechanisms != NULL) { 722 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 723 sizeof (crypto_mech_info_t) * (mcount - 1)); 724 } 725 rand_mi = &desc->pd_mechanisms[mcount - 1]; 726 727 bzero(rand_mi, sizeof (crypto_mech_info_t)); 728 (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM, 729 CRYPTO_MAX_MECH_NAME); 730 rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM; 731 } else { 732 ASSERT(info->pi_mechanisms != NULL); 733 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 734 sizeof (crypto_mech_info_t) * mcount); 735 } 736 } 737 738 /* 739 * For each mechanism support by the provider, add the provider 740 * to the corresponding KCF mechanism mech_entry chain. 741 */ 742 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) { 743 crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx]; 744 745 if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) && 746 (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) { 747 err = CRYPTO_ARGUMENTS_BAD; 748 break; 749 } 750 751 if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE && 752 mi->cm_func_group_mask & CRYPTO_FG_DIGEST) { 753 /* 754 * We ask the provider to specify the limit 755 * per hash mechanism. But, in practice, a 756 * hardware limitation means all hash mechanisms 757 * will have the same maximum size allowed for 758 * input data. So, we make it a per provider 759 * limit to keep it simple. 760 */ 761 if (mi->cm_max_input_length == 0) { 762 err = CRYPTO_ARGUMENTS_BAD; 763 break; 764 } else { 765 desc->pd_hash_limit = mi->cm_max_input_length; 766 } 767 } 768 769 if (kcf_add_mech_provider(mech_idx, desc, &pmd) != KCF_SUCCESS) 770 break; 771 772 if (pmd == NULL) 773 continue; 774 775 /* The provider will be used for this mechanism */ 776 desc_use_count++; 777 } 778 779 /* 780 * Don't allow multiple software providers with disabled mechanisms 781 * to register. Subsequent enabling of mechanisms will result in 782 * an unsupported configuration, i.e. multiple software providers 783 * per mechanism. 784 */ 785 if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER) 786 return (CRYPTO_ARGUMENTS_BAD); 787 788 if (err == KCF_SUCCESS) 789 return (CRYPTO_SUCCESS); 790 791 /* 792 * An error occurred while adding the mechanism, cleanup 793 * and bail. 794 */ 795 for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) { 796 kcf_remove_mech_provider( 797 desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc); 798 } 799 800 if (err == KCF_MECH_TAB_FULL) 801 return (CRYPTO_HOST_MEMORY); 802 803 return (CRYPTO_ARGUMENTS_BAD); 804 } 805 806 /* 807 * Update routine for kstat. Only privileged users are allowed to 808 * access this information, since this information is sensitive. 809 * There are some cryptographic attacks (e.g. traffic analysis) 810 * which can use this information. 811 */ 812 static int 813 kcf_prov_kstat_update(kstat_t *ksp, int rw) 814 { 815 kcf_prov_stats_t *ks_data; 816 kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private; 817 818 if (rw == KSTAT_WRITE) 819 return (EACCES); 820 821 ks_data = ksp->ks_data; 822 823 if (secpolicy_sys_config(CRED(), B_TRUE) != 0) { 824 ks_data->ps_ops_total.value.ui64 = 0; 825 ks_data->ps_ops_passed.value.ui64 = 0; 826 ks_data->ps_ops_failed.value.ui64 = 0; 827 ks_data->ps_ops_busy_rval.value.ui64 = 0; 828 } else { 829 ks_data->ps_ops_total.value.ui64 = 830 pd->pd_sched_info.ks_ndispatches; 831 ks_data->ps_ops_failed.value.ui64 = 832 pd->pd_sched_info.ks_nfails; 833 ks_data->ps_ops_busy_rval.value.ui64 = 834 pd->pd_sched_info.ks_nbusy_rval; 835 ks_data->ps_ops_passed.value.ui64 = 836 pd->pd_sched_info.ks_ndispatches - 837 pd->pd_sched_info.ks_nfails - 838 pd->pd_sched_info.ks_nbusy_rval; 839 } 840 841 return (0); 842 } 843 844 845 /* 846 * Utility routine called from failure paths in crypto_register_provider() 847 * and from crypto_load_soft_disabled(). 848 */ 849 void 850 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov) 851 { 852 uint_t mech_idx; 853 854 /* remove the provider from the mechanisms tables */ 855 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; 856 mech_idx++) { 857 kcf_remove_mech_provider( 858 desc->pd_mechanisms[mech_idx].cm_mech_name, desc); 859 } 860 861 /* remove provider from providers table */ 862 if (remove_prov) 863 (void) kcf_prov_tab_rem_provider(desc->pd_prov_id); 864 } 865 866 /* 867 * Utility routine called from crypto_load_soft_disabled(). Callers 868 * should have done a prior undo_register_provider(). 869 */ 870 void 871 redo_register_provider(kcf_provider_desc_t *pd) 872 { 873 /* process the mechanisms supported by the provider */ 874 (void) init_prov_mechs(NULL, pd); 875 876 /* 877 * Hold provider in providers table. We should not call 878 * kcf_prov_tab_add_provider() here as the provider descriptor 879 * is still valid which means it has an entry in the provider 880 * table. 881 */ 882 KCF_PROV_REFHOLD(pd); 883 KCF_PROV_IREFHOLD(pd); 884 } 885 886 /* 887 * Add provider (p1) to another provider's array of providers (p2). 888 * Hardware and logical providers use this array to cross-reference 889 * each other. 890 */ 891 static void 892 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) 893 { 894 kcf_provider_list_t *new; 895 896 new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP); 897 mutex_enter(&p2->pd_lock); 898 new->pl_next = p2->pd_provider_list; 899 p2->pd_provider_list = new; 900 KCF_PROV_IREFHOLD(p1); 901 new->pl_provider = p1; 902 mutex_exit(&p2->pd_lock); 903 } 904 905 /* 906 * Remove provider (p1) from another provider's array of providers (p2). 907 * Hardware and logical providers use this array to cross-reference 908 * each other. 909 */ 910 static void 911 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) 912 { 913 914 kcf_provider_list_t *pl = NULL, **prev; 915 916 mutex_enter(&p2->pd_lock); 917 for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list; 918 pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) { 919 if (pl->pl_provider == p1) { 920 break; 921 } 922 } 923 924 if (p1 == NULL) { 925 mutex_exit(&p2->pd_lock); 926 return; 927 } 928 929 /* detach and free kcf_provider_list structure */ 930 KCF_PROV_IREFRELE(p1); 931 *prev = pl->pl_next; 932 kmem_free(pl, sizeof (*pl)); 933 mutex_exit(&p2->pd_lock); 934 } 935 936 /* 937 * Convert an array of logical provider handles (crypto_provider_id) 938 * stored in a crypto_provider_info structure into an array of provider 939 * descriptors (kcf_provider_desc_t) attached to a logical provider. 940 */ 941 static void 942 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp) 943 { 944 kcf_provider_desc_t *lp; 945 crypto_provider_id_t handle; 946 int count = info->pi_logical_provider_count; 947 int i; 948 949 /* add hardware provider to each logical provider */ 950 for (i = 0; i < count; i++) { 951 handle = info->pi_logical_providers[i]; 952 lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle); 953 if (lp == NULL) { 954 continue; 955 } 956 add_provider_to_array(hp, lp); 957 hp->pd_flags |= KCF_LPROV_MEMBER; 958 959 /* 960 * A hardware provider has to have the provider descriptor of 961 * every logical provider it belongs to, so it can be removed 962 * from the logical provider if the hardware provider 963 * unregisters from the framework. 964 */ 965 add_provider_to_array(lp, hp); 966 KCF_PROV_REFRELE(lp); 967 } 968 } 969 970 /* 971 * This routine removes a provider from all of the logical or 972 * hardware providers it belongs to, and frees the provider's 973 * array of pointers to providers. 974 */ 975 static void 976 remove_provider(kcf_provider_desc_t *pp) 977 { 978 kcf_provider_desc_t *p; 979 kcf_provider_list_t *e, *next; 980 981 mutex_enter(&pp->pd_lock); 982 for (e = pp->pd_provider_list; e != NULL; e = next) { 983 p = e->pl_provider; 984 remove_provider_from_array(pp, p); 985 if (p->pd_prov_type == CRYPTO_HW_PROVIDER && 986 p->pd_provider_list == NULL) 987 p->pd_flags &= ~KCF_LPROV_MEMBER; 988 KCF_PROV_IREFRELE(p); 989 next = e->pl_next; 990 kmem_free(e, sizeof (*e)); 991 } 992 pp->pd_provider_list = NULL; 993 mutex_exit(&pp->pd_lock); 994 } 995