1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * This file is part of the core Kernel Cryptographic Framework. 28 * It implements the SPI functions exported to cryptographic 29 * providers. 30 */ 31 32 #include <sys/ksynch.h> 33 #include <sys/cmn_err.h> 34 #include <sys/ddi.h> 35 #include <sys/sunddi.h> 36 #include <sys/modctl.h> 37 #include <sys/crypto/common.h> 38 #include <sys/crypto/impl.h> 39 #include <sys/crypto/sched_impl.h> 40 #include <sys/crypto/spi.h> 41 #include <sys/crypto/ioctladmin.h> 42 #include <sys/taskq.h> 43 #include <sys/disp.h> 44 #include <sys/kstat.h> 45 #include <sys/policy.h> 46 #include <sys/cpuvar.h> 47 48 /* 49 * minalloc and maxalloc values to be used for taskq_create(). 50 */ 51 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS; 52 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN; 53 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX; 54 55 static void remove_provider(kcf_provider_desc_t *); 56 static void process_logical_providers(crypto_provider_info_t *, 57 kcf_provider_desc_t *); 58 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *); 59 static int kcf_prov_kstat_update(kstat_t *, int); 60 static void undo_register_provider_extra(kcf_provider_desc_t *); 61 static void delete_kstat(kcf_provider_desc_t *); 62 63 static kcf_prov_stats_t kcf_stats_ks_data_template = { 64 { "kcf_ops_total", KSTAT_DATA_UINT64 }, 65 { "kcf_ops_passed", KSTAT_DATA_UINT64 }, 66 { "kcf_ops_failed", KSTAT_DATA_UINT64 }, 67 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 } 68 }; 69 70 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \ 71 *((dst)->ops) = *((src)->ops); 72 73 /* 74 * Copy an ops vector from src to dst. Used during provider registration 75 * to copy the ops vector from the provider info structure to the 76 * provider descriptor maintained by KCF. 77 * Copying the ops vector specified by the provider is needed since the 78 * framework does not require the provider info structure to be 79 * persistent. 80 */ 81 static void 82 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 83 { 84 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops); 85 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops); 86 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops); 87 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops); 88 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops); 89 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops); 90 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops); 91 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops); 92 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops); 93 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops); 94 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops); 95 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops); 96 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops); 97 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops); 98 } 99 100 static void 101 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 102 { 103 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops); 104 } 105 106 static void 107 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 108 { 109 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops); 110 } 111 112 static void 113 copy_ops_vector_v4(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 114 { 115 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_fips140_ops); 116 } 117 118 /* 119 * This routine is used to add cryptographic providers to the KEF framework. 120 * Providers pass a crypto_provider_info structure to crypto_register_provider() 121 * and get back a handle. The crypto_provider_info structure contains a 122 * list of mechanisms supported by the provider and an ops vector containing 123 * provider entry points. Hardware providers call this routine in their attach 124 * routines. Software providers call this routine in their _init() routine. 125 */ 126 int 127 crypto_register_provider(crypto_provider_info_t *info, 128 crypto_kcf_provider_handle_t *handle) 129 { 130 int need_fips140_verify, need_verify = 1; 131 struct modctl *mcp; 132 char *name; 133 char ks_name[KSTAT_STRLEN]; 134 kcf_provider_desc_t *prov_desc = NULL; 135 int ret = CRYPTO_ARGUMENTS_BAD; 136 137 if (info->pi_interface_version > CRYPTO_SPI_VERSION_4) 138 return (CRYPTO_VERSION_MISMATCH); 139 140 /* 141 * Check provider type, must be software, hardware, or logical. 142 */ 143 if (info->pi_provider_type != CRYPTO_HW_PROVIDER && 144 info->pi_provider_type != CRYPTO_SW_PROVIDER && 145 info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) 146 return (CRYPTO_ARGUMENTS_BAD); 147 148 /* 149 * Allocate and initialize a new provider descriptor. We also 150 * hold it and release it when done. 151 */ 152 prov_desc = kcf_alloc_provider_desc(info); 153 KCF_PROV_REFHOLD(prov_desc); 154 155 prov_desc->pd_prov_type = info->pi_provider_type; 156 157 /* provider-private handle, opaque to KCF */ 158 prov_desc->pd_prov_handle = info->pi_provider_handle; 159 160 /* copy provider description string */ 161 if (info->pi_provider_description != NULL) { 162 /* 163 * pi_provider_descriptor is a string that can contain 164 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters 165 * INCLUDING the terminating null character. A bcopy() 166 * is necessary here as pd_description should not have 167 * a null character. See comments in kcf_alloc_provider_desc() 168 * for details on pd_description field. 169 */ 170 bcopy(info->pi_provider_description, prov_desc->pd_description, 171 min(strlen(info->pi_provider_description), 172 CRYPTO_PROVIDER_DESCR_MAX_LEN)); 173 } 174 175 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) { 176 if (info->pi_ops_vector == NULL) { 177 goto bail; 178 } 179 copy_ops_vector_v1(info->pi_ops_vector, 180 prov_desc->pd_ops_vector); 181 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) { 182 copy_ops_vector_v2(info->pi_ops_vector, 183 prov_desc->pd_ops_vector); 184 prov_desc->pd_flags = info->pi_flags; 185 } 186 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_3) { 187 copy_ops_vector_v3(info->pi_ops_vector, 188 prov_desc->pd_ops_vector); 189 } 190 if (info->pi_interface_version == CRYPTO_SPI_VERSION_4) { 191 copy_ops_vector_v4(info->pi_ops_vector, 192 prov_desc->pd_ops_vector); 193 } 194 } 195 196 /* object_ops and nostore_key_ops are mutually exclusive */ 197 if (prov_desc->pd_ops_vector->co_object_ops && 198 prov_desc->pd_ops_vector->co_nostore_key_ops) { 199 goto bail; 200 } 201 /* 202 * For software providers, copy the module name and module ID. 203 * For hardware providers, copy the driver name and instance. 204 */ 205 switch (info->pi_provider_type) { 206 case CRYPTO_SW_PROVIDER: 207 if (info->pi_provider_dev.pd_sw == NULL) 208 goto bail; 209 210 if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL) 211 goto bail; 212 213 prov_desc->pd_module_id = mcp->mod_id; 214 name = mcp->mod_modname; 215 break; 216 217 case CRYPTO_HW_PROVIDER: 218 case CRYPTO_LOGICAL_PROVIDER: 219 if (info->pi_provider_dev.pd_hw == NULL) 220 goto bail; 221 222 prov_desc->pd_instance = 223 ddi_get_instance(info->pi_provider_dev.pd_hw); 224 name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw); 225 break; 226 } 227 if (name == NULL) 228 goto bail; 229 230 prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 231 (void) strcpy(prov_desc->pd_name, name); 232 233 if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL) 234 goto bail; 235 236 /* process the mechanisms supported by the provider */ 237 if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS) 238 goto bail; 239 240 /* 241 * Add provider to providers tables, also sets the descriptor 242 * pd_prov_id field. 243 */ 244 if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) { 245 undo_register_provider(prov_desc, B_FALSE); 246 goto bail; 247 } 248 249 if ((need_verify = kcf_need_signature_verification(prov_desc)) == -1) { 250 undo_register_provider(prov_desc, B_TRUE); 251 ret = CRYPTO_MODVERIFICATION_FAILED; 252 goto bail; 253 } 254 255 if ((need_fips140_verify = 256 kcf_need_fips140_verification(prov_desc)) == -1) { 257 mutex_enter(&prov_desc->pd_lock); 258 prov_desc->pd_state = KCF_PROV_VERIFICATION_FAILED; 259 mutex_exit(&prov_desc->pd_lock); 260 ret = CRYPTO_FIPS140_ERROR; 261 goto bail; 262 } 263 264 /* 265 * We create a taskq only for a hardware provider. The global 266 * software queue is used for software providers. We handle ordering 267 * of multi-part requests in the taskq routine. So, it is safe to 268 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag 269 * to keep some entries cached to improve performance. 270 */ 271 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) 272 prov_desc->pd_taskq = taskq_create("kcf_taskq", 273 crypto_taskq_threads, minclsyspri, 274 crypto_taskq_minalloc, crypto_taskq_maxalloc, 275 TASKQ_PREPOPULATE); 276 else 277 prov_desc->pd_taskq = NULL; 278 279 /* no kernel session to logical providers */ 280 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 281 /* 282 * Open a session for session-oriented providers. This session 283 * is used for all kernel consumers. This is fine as a provider 284 * is required to support multiple thread access to a session. 285 * We can do this only after the taskq has been created as we 286 * do a kcf_submit_request() to open the session. 287 */ 288 if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) { 289 kcf_req_params_t params; 290 291 KCF_WRAP_SESSION_OPS_PARAMS(¶ms, 292 KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0, 293 CRYPTO_USER, NULL, 0, prov_desc); 294 ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms, 295 B_FALSE); 296 297 if (ret != CRYPTO_SUCCESS) { 298 undo_register_provider(prov_desc, B_TRUE); 299 ret = CRYPTO_FAILED; 300 goto bail; 301 } 302 } 303 } 304 305 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 306 /* 307 * Create the kstat for this provider. There is a kstat 308 * installed for each successfully registered provider. 309 * This kstat is deleted, when the provider unregisters. 310 */ 311 if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 312 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s", 313 prov_desc->pd_name, "provider_stats"); 314 } else { 315 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s", 316 prov_desc->pd_name, prov_desc->pd_instance, 317 prov_desc->pd_prov_id, "provider_stats"); 318 } 319 320 prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto", 321 KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) / 322 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 323 324 if (prov_desc->pd_kstat != NULL) { 325 bcopy(&kcf_stats_ks_data_template, 326 &prov_desc->pd_ks_data, 327 sizeof (kcf_stats_ks_data_template)); 328 prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data; 329 KCF_PROV_REFHOLD(prov_desc); 330 prov_desc->pd_kstat->ks_private = prov_desc; 331 prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update; 332 kstat_install(prov_desc->pd_kstat); 333 } 334 } 335 336 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) 337 process_logical_providers(info, prov_desc); 338 339 /* This provider needs to wait until we know the FIPS 140 status */ 340 if (need_fips140_verify == 1) { 341 mutex_enter(&prov_desc->pd_lock); 342 prov_desc->pd_state = KCF_PROV_UNVERIFIED_FIPS140; 343 mutex_exit(&prov_desc->pd_lock); 344 goto exit; 345 } 346 347 /* This provider needs to have the signature verified */ 348 if (need_verify == 1) { 349 mutex_enter(&prov_desc->pd_lock); 350 prov_desc->pd_state = KCF_PROV_UNVERIFIED; 351 mutex_exit(&prov_desc->pd_lock); 352 353 /* kcf_verify_signature routine will release this hold */ 354 KCF_PROV_REFHOLD(prov_desc); 355 356 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) { 357 /* 358 * It is not safe to make the door upcall to kcfd from 359 * this context since the kcfd thread could reenter 360 * devfs. So, we dispatch a taskq job to do the 361 * verification and return to the provider. 362 */ 363 (void) taskq_dispatch(system_taskq, 364 kcf_verify_signature, (void *)prov_desc, TQ_SLEEP); 365 } else if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 366 kcf_verify_signature(prov_desc); 367 if (prov_desc->pd_state == 368 KCF_PROV_VERIFICATION_FAILED) { 369 undo_register_provider_extra(prov_desc); 370 ret = CRYPTO_MODVERIFICATION_FAILED; 371 goto bail; 372 } 373 } 374 } else { 375 mutex_enter(&prov_desc->pd_lock); 376 prov_desc->pd_state = KCF_PROV_READY; 377 mutex_exit(&prov_desc->pd_lock); 378 kcf_do_notify(prov_desc, B_TRUE); 379 } 380 381 exit: 382 *handle = prov_desc->pd_kcf_prov_handle; 383 ret = CRYPTO_SUCCESS; 384 385 bail: 386 KCF_PROV_REFRELE(prov_desc); 387 return (ret); 388 } 389 390 /* Return the number of holds on a provider. */ 391 int 392 kcf_get_refcnt(kcf_provider_desc_t *pd, boolean_t do_lock) 393 { 394 int i; 395 int refcnt = 0; 396 397 if (do_lock) 398 for (i = 0; i < pd->pd_nbins; i++) 399 mutex_enter(&(pd->pd_percpu_bins[i].kp_lock)); 400 401 for (i = 0; i < pd->pd_nbins; i++) 402 refcnt += pd->pd_percpu_bins[i].kp_holdcnt; 403 404 if (do_lock) 405 for (i = 0; i < pd->pd_nbins; i++) 406 mutex_exit(&(pd->pd_percpu_bins[i].kp_lock)); 407 408 return (refcnt); 409 } 410 411 /* 412 * This routine is used to notify the framework when a provider is being 413 * removed. Hardware providers call this routine in their detach routines. 414 * Software providers call this routine in their _fini() routine. 415 */ 416 int 417 crypto_unregister_provider(crypto_kcf_provider_handle_t handle) 418 { 419 uint_t mech_idx; 420 kcf_provider_desc_t *desc; 421 kcf_prov_state_t saved_state; 422 423 /* lookup provider descriptor */ 424 if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL) 425 return (CRYPTO_UNKNOWN_PROVIDER); 426 427 mutex_enter(&desc->pd_lock); 428 /* 429 * Check if any other thread is disabling or removing 430 * this provider. We return if this is the case. 431 */ 432 if (desc->pd_state >= KCF_PROV_DISABLED) { 433 mutex_exit(&desc->pd_lock); 434 /* Release reference held by kcf_prov_tab_lookup(). */ 435 KCF_PROV_REFRELE(desc); 436 return (CRYPTO_BUSY); 437 } 438 439 saved_state = desc->pd_state; 440 desc->pd_state = KCF_PROV_UNREGISTERING; 441 442 if (saved_state == KCF_PROV_BUSY) { 443 /* 444 * The per-provider taskq threads may be waiting. We 445 * signal them so that they can start failing requests. 446 */ 447 cv_broadcast(&desc->pd_resume_cv); 448 } 449 450 mutex_exit(&desc->pd_lock); 451 452 if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) { 453 remove_provider(desc); 454 } 455 456 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 457 /* remove the provider from the mechanisms tables */ 458 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; 459 mech_idx++) { 460 kcf_remove_mech_provider( 461 desc->pd_mechanisms[mech_idx].cm_mech_name, desc); 462 } 463 } 464 465 /* remove provider from providers table */ 466 if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) != 467 CRYPTO_SUCCESS) { 468 /* Release reference held by kcf_prov_tab_lookup(). */ 469 KCF_PROV_REFRELE(desc); 470 return (CRYPTO_UNKNOWN_PROVIDER); 471 } 472 473 delete_kstat(desc); 474 475 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 476 /* 477 * Wait till the existing requests with the provider complete 478 * and all the holds are released. All the holds on a software 479 * provider are from kernel clients and the hold time 480 * is expected to be short. So, we won't be stuck here forever. 481 */ 482 while (kcf_get_refcnt(desc, B_TRUE) > 1) { 483 /* wait 1 second and try again. */ 484 delay(1 * drv_usectohz(1000000)); 485 } 486 } else { 487 int i; 488 kcf_prov_cpu_t *mp; 489 490 /* 491 * Wait until requests that have been sent to the provider 492 * complete. 493 */ 494 for (i = 0; i < desc->pd_nbins; i++) { 495 mp = &(desc->pd_percpu_bins[i]); 496 497 mutex_enter(&mp->kp_lock); 498 while (mp->kp_jobcnt > 0) { 499 cv_wait(&mp->kp_cv, &mp->kp_lock); 500 } 501 mutex_exit(&mp->kp_lock); 502 } 503 } 504 505 mutex_enter(&desc->pd_lock); 506 desc->pd_state = KCF_PROV_UNREGISTERED; 507 mutex_exit(&desc->pd_lock); 508 509 kcf_do_notify(desc, B_FALSE); 510 511 mutex_enter(&prov_tab_mutex); 512 /* Release reference held by kcf_prov_tab_lookup(). */ 513 KCF_PROV_REFRELE(desc); 514 515 if (kcf_get_refcnt(desc, B_TRUE) == 0) { 516 /* kcf_free_provider_desc drops prov_tab_mutex */ 517 kcf_free_provider_desc(desc); 518 } else { 519 ASSERT(desc->pd_prov_type != CRYPTO_SW_PROVIDER); 520 /* 521 * We could avoid this if /dev/crypto can proactively 522 * remove any holds on us from a dormant PKCS #11 app. 523 * For now, we check the provider table for 524 * KCF_PROV_UNREGISTERED entries when a provider is 525 * added to the table or when a provider is removed from it 526 * and free them when refcnt reaches zero. 527 */ 528 kcf_need_provtab_walk = B_TRUE; 529 mutex_exit(&prov_tab_mutex); 530 } 531 532 return (CRYPTO_SUCCESS); 533 } 534 535 /* 536 * This routine is used to notify the framework that the state of 537 * a cryptographic provider has changed. Valid state codes are: 538 * 539 * CRYPTO_PROVIDER_READY 540 * The provider indicates that it can process more requests. A provider 541 * will notify with this event if it previously has notified us with a 542 * CRYPTO_PROVIDER_BUSY. 543 * 544 * CRYPTO_PROVIDER_BUSY 545 * The provider can not take more requests. 546 * 547 * CRYPTO_PROVIDER_FAILED 548 * The provider encountered an internal error. The framework will not 549 * be sending any more requests to the provider. The provider may notify 550 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error. 551 * 552 * This routine can be called from user or interrupt context. 553 */ 554 void 555 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state) 556 { 557 kcf_provider_desc_t *pd; 558 559 /* lookup the provider from the given handle */ 560 if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL) 561 return; 562 563 mutex_enter(&pd->pd_lock); 564 565 if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED) 566 goto out; 567 568 if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 569 cmn_err(CE_WARN, "crypto_provider_notification: " 570 "logical provider (%x) ignored\n", handle); 571 goto out; 572 } 573 switch (state) { 574 case CRYPTO_PROVIDER_READY: 575 switch (pd->pd_state) { 576 case KCF_PROV_BUSY: 577 pd->pd_state = KCF_PROV_READY; 578 /* 579 * Signal the per-provider taskq threads that they 580 * can start submitting requests. 581 */ 582 cv_broadcast(&pd->pd_resume_cv); 583 break; 584 585 case KCF_PROV_FAILED: 586 /* 587 * The provider recovered from the error. Let us 588 * use it now. 589 */ 590 pd->pd_state = KCF_PROV_READY; 591 break; 592 } 593 break; 594 595 case CRYPTO_PROVIDER_BUSY: 596 switch (pd->pd_state) { 597 case KCF_PROV_READY: 598 pd->pd_state = KCF_PROV_BUSY; 599 break; 600 } 601 break; 602 603 case CRYPTO_PROVIDER_FAILED: 604 /* 605 * We note the failure and return. The per-provider taskq 606 * threads check this flag and start failing the 607 * requests, if it is set. See process_req_hwp() for details. 608 */ 609 switch (pd->pd_state) { 610 case KCF_PROV_READY: 611 pd->pd_state = KCF_PROV_FAILED; 612 break; 613 614 case KCF_PROV_BUSY: 615 pd->pd_state = KCF_PROV_FAILED; 616 /* 617 * The per-provider taskq threads may be waiting. We 618 * signal them so that they can start failing requests. 619 */ 620 cv_broadcast(&pd->pd_resume_cv); 621 break; 622 } 623 break; 624 } 625 out: 626 mutex_exit(&pd->pd_lock); 627 KCF_PROV_REFRELE(pd); 628 } 629 630 /* 631 * This routine is used to notify the framework the result of 632 * an asynchronous request handled by a provider. Valid error 633 * codes are the same as the CRYPTO_* errors defined in common.h. 634 * 635 * This routine can be called from user or interrupt context. 636 */ 637 void 638 crypto_op_notification(crypto_req_handle_t handle, int error) 639 { 640 kcf_call_type_t ctype; 641 642 if (handle == NULL) 643 return; 644 645 if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) { 646 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle; 647 648 KCF_PROV_JOB_RELE_STAT(sreq->sn_mp, (error != CRYPTO_SUCCESS)); 649 kcf_sop_done(sreq, error); 650 } else { 651 kcf_areq_node_t *areq = (kcf_areq_node_t *)handle; 652 653 ASSERT(ctype == CRYPTO_ASYNCH); 654 KCF_PROV_JOB_RELE_STAT(areq->an_mp, (error != CRYPTO_SUCCESS)); 655 kcf_aop_done(areq, error); 656 } 657 } 658 659 /* 660 * This routine is used by software providers to determine 661 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation. 662 * Note that hardware providers can always use KM_SLEEP. So, 663 * they do not need to call this routine. 664 * 665 * This routine can be called from user or interrupt context. 666 */ 667 int 668 crypto_kmflag(crypto_req_handle_t handle) 669 { 670 return (REQHNDL2_KMFLAG(handle)); 671 } 672 673 /* 674 * Process the mechanism info structures specified by the provider 675 * during registration. A NULL crypto_provider_info_t indicates 676 * an already initialized provider descriptor. 677 * 678 * Mechanisms are not added to the kernel's mechanism table if the 679 * provider is a logical provider. 680 * 681 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one 682 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY 683 * if the table of mechanisms is full. 684 */ 685 static int 686 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc) 687 { 688 uint_t mech_idx; 689 uint_t cleanup_idx; 690 int err = CRYPTO_SUCCESS; 691 kcf_prov_mech_desc_t *pmd; 692 int desc_use_count = 0; 693 int mcount = desc->pd_mech_list_count; 694 695 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 696 if (info != NULL) { 697 ASSERT(info->pi_mechanisms != NULL); 698 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 699 sizeof (crypto_mech_info_t) * mcount); 700 } 701 return (CRYPTO_SUCCESS); 702 } 703 704 /* 705 * Copy the mechanism list from the provider info to the provider 706 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t 707 * element if the provider has random_ops since we keep an internal 708 * mechanism, SUN_RANDOM, in this case. 709 */ 710 if (info != NULL) { 711 if (info->pi_ops_vector->co_random_ops != NULL) { 712 crypto_mech_info_t *rand_mi; 713 714 /* 715 * Need the following check as it is possible to have 716 * a provider that implements just random_ops and has 717 * pi_mechanisms == NULL. 718 */ 719 if (info->pi_mechanisms != NULL) { 720 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 721 sizeof (crypto_mech_info_t) * (mcount - 1)); 722 } 723 rand_mi = &desc->pd_mechanisms[mcount - 1]; 724 725 bzero(rand_mi, sizeof (crypto_mech_info_t)); 726 (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM, 727 CRYPTO_MAX_MECH_NAME); 728 rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM; 729 } else { 730 ASSERT(info->pi_mechanisms != NULL); 731 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 732 sizeof (crypto_mech_info_t) * mcount); 733 } 734 } 735 736 /* 737 * For each mechanism support by the provider, add the provider 738 * to the corresponding KCF mechanism mech_entry chain. 739 */ 740 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) { 741 crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx]; 742 743 if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) && 744 (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) { 745 err = CRYPTO_ARGUMENTS_BAD; 746 break; 747 } 748 749 if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE && 750 mi->cm_func_group_mask & CRYPTO_FG_DIGEST) { 751 /* 752 * We ask the provider to specify the limit 753 * per hash mechanism. But, in practice, a 754 * hardware limitation means all hash mechanisms 755 * will have the same maximum size allowed for 756 * input data. So, we make it a per provider 757 * limit to keep it simple. 758 */ 759 if (mi->cm_max_input_length == 0) { 760 err = CRYPTO_ARGUMENTS_BAD; 761 break; 762 } else { 763 desc->pd_hash_limit = mi->cm_max_input_length; 764 } 765 } 766 767 if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) != 768 KCF_SUCCESS) 769 break; 770 771 if (pmd == NULL) 772 continue; 773 774 /* The provider will be used for this mechanism */ 775 desc_use_count++; 776 } 777 778 /* 779 * Don't allow multiple software providers with disabled mechanisms 780 * to register. Subsequent enabling of mechanisms will result in 781 * an unsupported configuration, i.e. multiple software providers 782 * per mechanism. 783 */ 784 if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER) 785 return (CRYPTO_ARGUMENTS_BAD); 786 787 if (err == KCF_SUCCESS) 788 return (CRYPTO_SUCCESS); 789 790 /* 791 * An error occurred while adding the mechanism, cleanup 792 * and bail. 793 */ 794 for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) { 795 kcf_remove_mech_provider( 796 desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc); 797 } 798 799 if (err == KCF_MECH_TAB_FULL) 800 return (CRYPTO_HOST_MEMORY); 801 802 return (CRYPTO_ARGUMENTS_BAD); 803 } 804 805 /* 806 * Update routine for kstat. Only privileged users are allowed to 807 * access this information, since this information is sensitive. 808 * There are some cryptographic attacks (e.g. traffic analysis) 809 * which can use this information. 810 */ 811 static int 812 kcf_prov_kstat_update(kstat_t *ksp, int rw) 813 { 814 kcf_prov_stats_t *ks_data; 815 kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private; 816 int i; 817 818 if (rw == KSTAT_WRITE) 819 return (EACCES); 820 821 ks_data = ksp->ks_data; 822 823 if (secpolicy_sys_config(CRED(), B_TRUE) != 0) { 824 ks_data->ps_ops_total.value.ui64 = 0; 825 ks_data->ps_ops_passed.value.ui64 = 0; 826 ks_data->ps_ops_failed.value.ui64 = 0; 827 ks_data->ps_ops_busy_rval.value.ui64 = 0; 828 } else { 829 uint64_t dtotal, ftotal, btotal; 830 831 dtotal = ftotal = btotal = 0; 832 /* No locking done since an exact count is not required. */ 833 for (i = 0; i < pd->pd_nbins; i++) { 834 dtotal += pd->pd_percpu_bins[i].kp_ndispatches; 835 ftotal += pd->pd_percpu_bins[i].kp_nfails; 836 btotal += pd->pd_percpu_bins[i].kp_nbusy_rval; 837 } 838 839 ks_data->ps_ops_total.value.ui64 = dtotal; 840 ks_data->ps_ops_failed.value.ui64 = ftotal; 841 ks_data->ps_ops_busy_rval.value.ui64 = btotal; 842 ks_data->ps_ops_passed.value.ui64 = dtotal - ftotal - btotal; 843 } 844 845 return (0); 846 } 847 848 849 /* 850 * Utility routine called from failure paths in crypto_register_provider() 851 * and from crypto_load_soft_disabled(). 852 */ 853 void 854 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov) 855 { 856 uint_t mech_idx; 857 858 /* remove the provider from the mechanisms tables */ 859 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; 860 mech_idx++) { 861 kcf_remove_mech_provider( 862 desc->pd_mechanisms[mech_idx].cm_mech_name, desc); 863 } 864 865 /* remove provider from providers table */ 866 if (remove_prov) 867 (void) kcf_prov_tab_rem_provider(desc->pd_prov_id); 868 } 869 870 static void 871 undo_register_provider_extra(kcf_provider_desc_t *desc) 872 { 873 delete_kstat(desc); 874 undo_register_provider(desc, B_TRUE); 875 } 876 877 /* 878 * Utility routine called from crypto_load_soft_disabled(). Callers 879 * should have done a prior undo_register_provider(). 880 */ 881 void 882 redo_register_provider(kcf_provider_desc_t *pd) 883 { 884 /* process the mechanisms supported by the provider */ 885 (void) init_prov_mechs(NULL, pd); 886 887 /* 888 * Hold provider in providers table. We should not call 889 * kcf_prov_tab_add_provider() here as the provider descriptor 890 * is still valid which means it has an entry in the provider 891 * table. 892 */ 893 KCF_PROV_REFHOLD(pd); 894 } 895 896 /* 897 * Add provider (p1) to another provider's array of providers (p2). 898 * Hardware and logical providers use this array to cross-reference 899 * each other. 900 */ 901 static void 902 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) 903 { 904 kcf_provider_list_t *new; 905 906 new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP); 907 mutex_enter(&p2->pd_lock); 908 new->pl_next = p2->pd_provider_list; 909 p2->pd_provider_list = new; 910 new->pl_provider = p1; 911 mutex_exit(&p2->pd_lock); 912 } 913 914 /* 915 * Remove provider (p1) from another provider's array of providers (p2). 916 * Hardware and logical providers use this array to cross-reference 917 * each other. 918 */ 919 static void 920 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) 921 { 922 923 kcf_provider_list_t *pl = NULL, **prev; 924 925 mutex_enter(&p2->pd_lock); 926 for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list; 927 pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) { 928 if (pl->pl_provider == p1) { 929 break; 930 } 931 } 932 933 if (p1 == NULL) { 934 mutex_exit(&p2->pd_lock); 935 return; 936 } 937 938 /* detach and free kcf_provider_list structure */ 939 *prev = pl->pl_next; 940 kmem_free(pl, sizeof (*pl)); 941 mutex_exit(&p2->pd_lock); 942 } 943 944 /* 945 * Convert an array of logical provider handles (crypto_provider_id) 946 * stored in a crypto_provider_info structure into an array of provider 947 * descriptors (kcf_provider_desc_t) attached to a logical provider. 948 */ 949 static void 950 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp) 951 { 952 kcf_provider_desc_t *lp; 953 crypto_provider_id_t handle; 954 int count = info->pi_logical_provider_count; 955 int i; 956 957 /* add hardware provider to each logical provider */ 958 for (i = 0; i < count; i++) { 959 handle = info->pi_logical_providers[i]; 960 lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle); 961 if (lp == NULL) { 962 continue; 963 } 964 add_provider_to_array(hp, lp); 965 hp->pd_flags |= KCF_LPROV_MEMBER; 966 967 /* 968 * A hardware provider has to have the provider descriptor of 969 * every logical provider it belongs to, so it can be removed 970 * from the logical provider if the hardware provider 971 * unregisters from the framework. 972 */ 973 add_provider_to_array(lp, hp); 974 KCF_PROV_REFRELE(lp); 975 } 976 } 977 978 /* 979 * This routine removes a provider from all of the logical or 980 * hardware providers it belongs to, and frees the provider's 981 * array of pointers to providers. 982 */ 983 static void 984 remove_provider(kcf_provider_desc_t *pp) 985 { 986 kcf_provider_desc_t *p; 987 kcf_provider_list_t *e, *next; 988 989 mutex_enter(&pp->pd_lock); 990 for (e = pp->pd_provider_list; e != NULL; e = next) { 991 p = e->pl_provider; 992 remove_provider_from_array(pp, p); 993 if (p->pd_prov_type == CRYPTO_HW_PROVIDER && 994 p->pd_provider_list == NULL) 995 p->pd_flags &= ~KCF_LPROV_MEMBER; 996 next = e->pl_next; 997 kmem_free(e, sizeof (*e)); 998 } 999 pp->pd_provider_list = NULL; 1000 mutex_exit(&pp->pd_lock); 1001 } 1002 1003 /* 1004 * Dispatch events as needed for a provider. is_added flag tells 1005 * whether the provider is registering or unregistering. 1006 */ 1007 void 1008 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added) 1009 { 1010 int i; 1011 crypto_notify_event_change_t ec; 1012 1013 ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED); 1014 1015 /* 1016 * Inform interested clients of the mechanisms becoming 1017 * available/unavailable. We skip this for logical providers 1018 * as they do not affect mechanisms. 1019 */ 1020 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 1021 ec.ec_provider_type = prov_desc->pd_prov_type; 1022 ec.ec_change = is_added ? CRYPTO_MECH_ADDED : 1023 CRYPTO_MECH_REMOVED; 1024 for (i = 0; i < prov_desc->pd_mech_list_count; i++) { 1025 /* Skip any mechanisms not allowed by the policy */ 1026 if (is_mech_disabled(prov_desc, 1027 prov_desc->pd_mechanisms[i].cm_mech_name)) 1028 continue; 1029 1030 (void) strncpy(ec.ec_mech_name, 1031 prov_desc->pd_mechanisms[i].cm_mech_name, 1032 CRYPTO_MAX_MECH_NAME); 1033 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec); 1034 } 1035 1036 } 1037 1038 /* 1039 * Inform interested clients about the new or departing provider. 1040 * In case of a logical provider, we need to notify the event only 1041 * for the logical provider and not for the underlying 1042 * providers which are known by the KCF_LPROV_MEMBER bit. 1043 */ 1044 if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER || 1045 (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) { 1046 kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED : 1047 CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc); 1048 } 1049 } 1050 1051 static void 1052 delete_kstat(kcf_provider_desc_t *desc) 1053 { 1054 /* destroy the kstat created for this provider */ 1055 if (desc->pd_kstat != NULL) { 1056 kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private; 1057 1058 /* release reference held by desc->pd_kstat->ks_private */ 1059 ASSERT(desc == kspd); 1060 kstat_delete(kspd->pd_kstat); 1061 desc->pd_kstat = NULL; 1062 KCF_PROV_REFRELE(kspd); 1063 } 1064 } 1065