1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file is part of the core Kernel Cryptographic Framework. 30 * It implements the SPI functions exported to cryptographic 31 * providers. 32 */ 33 34 #include <sys/ksynch.h> 35 #include <sys/cmn_err.h> 36 #include <sys/ddi.h> 37 #include <sys/sunddi.h> 38 #include <sys/modctl.h> 39 #include <sys/crypto/common.h> 40 #include <sys/crypto/impl.h> 41 #include <sys/crypto/sched_impl.h> 42 #include <sys/crypto/spi.h> 43 #include <sys/taskq.h> 44 #include <sys/disp.h> 45 #include <sys/kstat.h> 46 #include <sys/policy.h> 47 #include <sys/cpuvar.h> 48 49 /* 50 * minalloc and maxalloc values to be used for taskq_create(). 51 */ 52 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS; 53 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN; 54 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX; 55 56 static void free_provider_list(kcf_provider_list_t *); 57 static void remove_provider(kcf_provider_desc_t *); 58 static void process_logical_providers(crypto_provider_info_t *, 59 kcf_provider_desc_t *); 60 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *); 61 static int kcf_prov_kstat_update(kstat_t *, int); 62 static void undo_register_provider_extra(kcf_provider_desc_t *); 63 static void delete_kstat(kcf_provider_desc_t *); 64 65 static kcf_prov_stats_t kcf_stats_ks_data_template = { 66 { "kcf_ops_total", KSTAT_DATA_UINT64 }, 67 { "kcf_ops_passed", KSTAT_DATA_UINT64 }, 68 { "kcf_ops_failed", KSTAT_DATA_UINT64 }, 69 { "kcf_ops_returned_busy", KSTAT_DATA_UINT64 } 70 }; 71 72 #define KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \ 73 *((dst)->ops) = *((src)->ops); 74 75 /* 76 * Copy an ops vector from src to dst. Used during provider registration 77 * to copy the ops vector from the provider info structure to the 78 * provider descriptor maintained by KCF. 79 * Copying the ops vector specified by the provider is needed since the 80 * framework does not require the provider info structure to be 81 * persistent. 82 */ 83 static void 84 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 85 { 86 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops); 87 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops); 88 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops); 89 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops); 90 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops); 91 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops); 92 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops); 93 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops); 94 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops); 95 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops); 96 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops); 97 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops); 98 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops); 99 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops); 100 } 101 102 static void 103 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 104 { 105 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops); 106 } 107 108 static void 109 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops) 110 { 111 KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops); 112 } 113 114 /* 115 * This routine is used to add cryptographic providers to the KEF framework. 116 * Providers pass a crypto_provider_info structure to crypto_register_provider() 117 * and get back a handle. The crypto_provider_info structure contains a 118 * list of mechanisms supported by the provider and an ops vector containing 119 * provider entry points. Hardware providers call this routine in their attach 120 * routines. Software providers call this routine in their _init() routine. 121 */ 122 int 123 crypto_register_provider(crypto_provider_info_t *info, 124 crypto_kcf_provider_handle_t *handle) 125 { 126 int need_verify; 127 struct modctl *mcp; 128 char *name; 129 char ks_name[KSTAT_STRLEN]; 130 131 kcf_provider_desc_t *prov_desc = NULL; 132 int ret = CRYPTO_ARGUMENTS_BAD; 133 134 if (info->pi_interface_version > CRYPTO_SPI_VERSION_3) 135 return (CRYPTO_VERSION_MISMATCH); 136 137 /* 138 * Check provider type, must be software, hardware, or logical. 139 */ 140 if (info->pi_provider_type != CRYPTO_HW_PROVIDER && 141 info->pi_provider_type != CRYPTO_SW_PROVIDER && 142 info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) 143 return (CRYPTO_ARGUMENTS_BAD); 144 145 /* 146 * Allocate and initialize a new provider descriptor. We also 147 * hold it and release it when done. 148 */ 149 prov_desc = kcf_alloc_provider_desc(info); 150 KCF_PROV_REFHOLD(prov_desc); 151 152 prov_desc->pd_prov_type = info->pi_provider_type; 153 154 /* provider-private handle, opaque to KCF */ 155 prov_desc->pd_prov_handle = info->pi_provider_handle; 156 157 /* copy provider description string */ 158 if (info->pi_provider_description != NULL) { 159 /* 160 * pi_provider_descriptor is a string that can contain 161 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters 162 * INCLUDING the terminating null character. A bcopy() 163 * is necessary here as pd_description should not have 164 * a null character. See comments in kcf_alloc_provider_desc() 165 * for details on pd_description field. 166 */ 167 bcopy(info->pi_provider_description, prov_desc->pd_description, 168 min(strlen(info->pi_provider_description), 169 CRYPTO_PROVIDER_DESCR_MAX_LEN)); 170 } 171 172 if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) { 173 if (info->pi_ops_vector == NULL) { 174 goto bail; 175 } 176 copy_ops_vector_v1(info->pi_ops_vector, 177 prov_desc->pd_ops_vector); 178 if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) { 179 copy_ops_vector_v2(info->pi_ops_vector, 180 prov_desc->pd_ops_vector); 181 prov_desc->pd_flags = info->pi_flags; 182 } 183 if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) { 184 copy_ops_vector_v3(info->pi_ops_vector, 185 prov_desc->pd_ops_vector); 186 } 187 } 188 189 /* object_ops and nostore_key_ops are mutually exclusive */ 190 if (prov_desc->pd_ops_vector->co_object_ops && 191 prov_desc->pd_ops_vector->co_nostore_key_ops) { 192 goto bail; 193 } 194 /* 195 * For software providers, copy the module name and module ID. 196 * For hardware providers, copy the driver name and instance. 197 */ 198 switch (info->pi_provider_type) { 199 case CRYPTO_SW_PROVIDER: 200 if (info->pi_provider_dev.pd_sw == NULL) 201 goto bail; 202 203 if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL) 204 goto bail; 205 206 prov_desc->pd_module_id = mcp->mod_id; 207 name = mcp->mod_modname; 208 break; 209 210 case CRYPTO_HW_PROVIDER: 211 case CRYPTO_LOGICAL_PROVIDER: 212 if (info->pi_provider_dev.pd_hw == NULL) 213 goto bail; 214 215 prov_desc->pd_instance = 216 ddi_get_instance(info->pi_provider_dev.pd_hw); 217 name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw); 218 break; 219 } 220 if (name == NULL) 221 goto bail; 222 223 prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 224 (void) strcpy(prov_desc->pd_name, name); 225 226 if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL) 227 goto bail; 228 229 /* process the mechanisms supported by the provider */ 230 if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS) 231 goto bail; 232 233 /* 234 * Add provider to providers tables, also sets the descriptor 235 * pd_prov_id field. 236 */ 237 if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) { 238 undo_register_provider(prov_desc, B_FALSE); 239 goto bail; 240 } 241 242 if ((need_verify = kcf_need_signature_verification(prov_desc)) == -1) { 243 undo_register_provider(prov_desc, B_TRUE); 244 ret = CRYPTO_MODVERIFICATION_FAILED; 245 goto bail; 246 } 247 248 /* 249 * We create a taskq only for a hardware provider. The global 250 * software queue is used for software providers. We handle ordering 251 * of multi-part requests in the taskq routine. So, it is safe to 252 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag 253 * to keep some entries cached to improve performance. 254 */ 255 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) 256 prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq", 257 crypto_taskq_threads, minclsyspri, 258 crypto_taskq_minalloc, crypto_taskq_maxalloc, 259 TASKQ_PREPOPULATE); 260 else 261 prov_desc->pd_sched_info.ks_taskq = NULL; 262 263 /* no kernel session to logical providers */ 264 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 265 /* 266 * Open a session for session-oriented providers. This session 267 * is used for all kernel consumers. This is fine as a provider 268 * is required to support multiple thread access to a session. 269 * We can do this only after the taskq has been created as we 270 * do a kcf_submit_request() to open the session. 271 */ 272 if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) { 273 kcf_req_params_t params; 274 275 KCF_WRAP_SESSION_OPS_PARAMS(¶ms, 276 KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0, 277 CRYPTO_USER, NULL, 0, prov_desc); 278 ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms, 279 B_FALSE); 280 281 if (ret != CRYPTO_SUCCESS) { 282 undo_register_provider(prov_desc, B_TRUE); 283 ret = CRYPTO_FAILED; 284 goto bail; 285 } 286 } 287 } 288 289 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 290 /* 291 * Create the kstat for this provider. There is a kstat 292 * installed for each successfully registered provider. 293 * This kstat is deleted, when the provider unregisters. 294 */ 295 if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 296 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s", 297 prov_desc->pd_name, "provider_stats"); 298 } else { 299 (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s", 300 prov_desc->pd_name, prov_desc->pd_instance, 301 prov_desc->pd_prov_id, "provider_stats"); 302 } 303 304 prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto", 305 KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) / 306 sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 307 308 if (prov_desc->pd_kstat != NULL) { 309 bcopy(&kcf_stats_ks_data_template, 310 &prov_desc->pd_ks_data, 311 sizeof (kcf_stats_ks_data_template)); 312 prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data; 313 KCF_PROV_REFHOLD(prov_desc); 314 KCF_PROV_IREFHOLD(prov_desc); 315 prov_desc->pd_kstat->ks_private = prov_desc; 316 prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update; 317 kstat_install(prov_desc->pd_kstat); 318 } 319 } 320 321 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) 322 process_logical_providers(info, prov_desc); 323 324 if (need_verify == 1) { 325 /* kcf_verify_signature routine will release these holds */ 326 KCF_PROV_REFHOLD(prov_desc); 327 KCF_PROV_IREFHOLD(prov_desc); 328 329 if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) { 330 /* 331 * It is not safe to make the door upcall to kcfd from 332 * this context since the kcfd thread could reenter 333 * devfs. So, we dispatch a taskq job to do the 334 * verification and return to the provider. 335 */ 336 (void) taskq_dispatch(system_taskq, 337 kcf_verify_signature, (void *)prov_desc, TQ_SLEEP); 338 } else if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 339 kcf_verify_signature(prov_desc); 340 if (prov_desc->pd_state == 341 KCF_PROV_VERIFICATION_FAILED) { 342 undo_register_provider_extra(prov_desc); 343 ret = CRYPTO_MODVERIFICATION_FAILED; 344 goto bail; 345 } 346 } 347 } else { 348 mutex_enter(&prov_desc->pd_lock); 349 prov_desc->pd_state = KCF_PROV_READY; 350 mutex_exit(&prov_desc->pd_lock); 351 kcf_do_notify(prov_desc, B_TRUE); 352 } 353 354 *handle = prov_desc->pd_kcf_prov_handle; 355 ret = CRYPTO_SUCCESS; 356 357 bail: 358 KCF_PROV_REFRELE(prov_desc); 359 return (ret); 360 } 361 362 /* 363 * This routine is used to notify the framework when a provider is being 364 * removed. Hardware providers call this routine in their detach routines. 365 * Software providers call this routine in their _fini() routine. 366 */ 367 int 368 crypto_unregister_provider(crypto_kcf_provider_handle_t handle) 369 { 370 uint_t mech_idx; 371 kcf_provider_desc_t *desc; 372 kcf_prov_state_t saved_state; 373 374 /* lookup provider descriptor */ 375 if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL) 376 return (CRYPTO_UNKNOWN_PROVIDER); 377 378 mutex_enter(&desc->pd_lock); 379 /* 380 * Check if any other thread is disabling or removing 381 * this provider. We return if this is the case. 382 */ 383 if (desc->pd_state >= KCF_PROV_DISABLED) { 384 mutex_exit(&desc->pd_lock); 385 /* Release reference held by kcf_prov_tab_lookup(). */ 386 KCF_PROV_REFRELE(desc); 387 return (CRYPTO_BUSY); 388 } 389 390 saved_state = desc->pd_state; 391 desc->pd_state = KCF_PROV_REMOVED; 392 393 if (saved_state == KCF_PROV_BUSY) { 394 /* 395 * The per-provider taskq threads may be waiting. We 396 * signal them so that they can start failing requests. 397 */ 398 cv_broadcast(&desc->pd_resume_cv); 399 } 400 401 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 402 /* 403 * Check if this provider is currently being used. 404 * pd_irefcnt is the number of holds from the internal 405 * structures. We add one to account for the above lookup. 406 */ 407 if (desc->pd_refcnt > desc->pd_irefcnt + 1) { 408 desc->pd_state = saved_state; 409 mutex_exit(&desc->pd_lock); 410 /* Release reference held by kcf_prov_tab_lookup(). */ 411 KCF_PROV_REFRELE(desc); 412 /* 413 * The administrator presumably will stop the clients 414 * thus removing the holds, when they get the busy 415 * return value. Any retry will succeed then. 416 */ 417 return (CRYPTO_BUSY); 418 } 419 } 420 mutex_exit(&desc->pd_lock); 421 422 if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) { 423 remove_provider(desc); 424 } 425 426 if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 427 /* remove the provider from the mechanisms tables */ 428 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; 429 mech_idx++) { 430 kcf_remove_mech_provider( 431 desc->pd_mechanisms[mech_idx].cm_mech_name, desc); 432 } 433 } 434 435 /* remove provider from providers table */ 436 if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) != 437 CRYPTO_SUCCESS) { 438 /* Release reference held by kcf_prov_tab_lookup(). */ 439 KCF_PROV_REFRELE(desc); 440 return (CRYPTO_UNKNOWN_PROVIDER); 441 } 442 443 delete_kstat(desc); 444 445 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 446 /* Release reference held by kcf_prov_tab_lookup(). */ 447 KCF_PROV_REFRELE(desc); 448 449 /* 450 * Wait till the existing requests complete. 451 */ 452 mutex_enter(&desc->pd_lock); 453 while (desc->pd_state != KCF_PROV_FREED) 454 cv_wait(&desc->pd_remove_cv, &desc->pd_lock); 455 mutex_exit(&desc->pd_lock); 456 } else { 457 /* 458 * Wait until requests that have been sent to the provider 459 * complete. 460 */ 461 mutex_enter(&desc->pd_lock); 462 while (desc->pd_irefcnt > 0) 463 cv_wait(&desc->pd_remove_cv, &desc->pd_lock); 464 mutex_exit(&desc->pd_lock); 465 } 466 467 kcf_do_notify(desc, B_FALSE); 468 469 if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) { 470 /* 471 * This is the only place where kcf_free_provider_desc() 472 * is called directly. KCF_PROV_REFRELE() should free the 473 * structure in all other places. 474 */ 475 ASSERT(desc->pd_state == KCF_PROV_FREED && 476 desc->pd_refcnt == 0); 477 kcf_free_provider_desc(desc); 478 } else { 479 KCF_PROV_REFRELE(desc); 480 } 481 482 return (CRYPTO_SUCCESS); 483 } 484 485 /* 486 * This routine is used to notify the framework that the state of 487 * a cryptographic provider has changed. Valid state codes are: 488 * 489 * CRYPTO_PROVIDER_READY 490 * The provider indicates that it can process more requests. A provider 491 * will notify with this event if it previously has notified us with a 492 * CRYPTO_PROVIDER_BUSY. 493 * 494 * CRYPTO_PROVIDER_BUSY 495 * The provider can not take more requests. 496 * 497 * CRYPTO_PROVIDER_FAILED 498 * The provider encountered an internal error. The framework will not 499 * be sending any more requests to the provider. The provider may notify 500 * with a CRYPTO_PROVIDER_READY, if it is able to recover from the error. 501 * 502 * This routine can be called from user or interrupt context. 503 */ 504 void 505 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state) 506 { 507 kcf_provider_desc_t *pd; 508 509 /* lookup the provider from the given handle */ 510 if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL) 511 return; 512 513 mutex_enter(&pd->pd_lock); 514 515 if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED) 516 goto out; 517 518 if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 519 cmn_err(CE_WARN, "crypto_provider_notification: " 520 "logical provider (%x) ignored\n", handle); 521 goto out; 522 } 523 switch (state) { 524 case CRYPTO_PROVIDER_READY: 525 switch (pd->pd_state) { 526 case KCF_PROV_BUSY: 527 pd->pd_state = KCF_PROV_READY; 528 /* 529 * Signal the per-provider taskq threads that they 530 * can start submitting requests. 531 */ 532 cv_broadcast(&pd->pd_resume_cv); 533 break; 534 535 case KCF_PROV_FAILED: 536 /* 537 * The provider recovered from the error. Let us 538 * use it now. 539 */ 540 pd->pd_state = KCF_PROV_READY; 541 break; 542 } 543 break; 544 545 case CRYPTO_PROVIDER_BUSY: 546 switch (pd->pd_state) { 547 case KCF_PROV_READY: 548 pd->pd_state = KCF_PROV_BUSY; 549 break; 550 } 551 break; 552 553 case CRYPTO_PROVIDER_FAILED: 554 /* 555 * We note the failure and return. The per-provider taskq 556 * threads check this flag and start failing the 557 * requests, if it is set. See process_req_hwp() for details. 558 */ 559 switch (pd->pd_state) { 560 case KCF_PROV_READY: 561 pd->pd_state = KCF_PROV_FAILED; 562 break; 563 564 case KCF_PROV_BUSY: 565 pd->pd_state = KCF_PROV_FAILED; 566 /* 567 * The per-provider taskq threads may be waiting. We 568 * signal them so that they can start failing requests. 569 */ 570 cv_broadcast(&pd->pd_resume_cv); 571 break; 572 } 573 break; 574 } 575 out: 576 mutex_exit(&pd->pd_lock); 577 KCF_PROV_REFRELE(pd); 578 } 579 580 /* 581 * This routine is used to notify the framework the result of 582 * an asynchronous request handled by a provider. Valid error 583 * codes are the same as the CRYPTO_* errors defined in common.h. 584 * 585 * This routine can be called from user or interrupt context. 586 */ 587 void 588 crypto_op_notification(crypto_req_handle_t handle, int error) 589 { 590 kcf_call_type_t ctype; 591 592 if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) { 593 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle; 594 595 if (error != CRYPTO_SUCCESS) 596 sreq->sn_provider->pd_sched_info.ks_nfails++; 597 KCF_PROV_IREFRELE(sreq->sn_provider); 598 kcf_sop_done(sreq, error); 599 } else { 600 kcf_areq_node_t *areq = (kcf_areq_node_t *)handle; 601 602 ASSERT(ctype == CRYPTO_ASYNCH); 603 if (error != CRYPTO_SUCCESS) 604 areq->an_provider->pd_sched_info.ks_nfails++; 605 KCF_PROV_IREFRELE(areq->an_provider); 606 kcf_aop_done(areq, error); 607 } 608 } 609 610 /* 611 * This routine is used by software providers to determine 612 * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation. 613 * Note that hardware providers can always use KM_SLEEP. So, 614 * they do not need to call this routine. 615 * 616 * This routine can be called from user or interrupt context. 617 */ 618 int 619 crypto_kmflag(crypto_req_handle_t handle) 620 { 621 return (REQHNDL2_KMFLAG(handle)); 622 } 623 624 /* 625 * Process the mechanism info structures specified by the provider 626 * during registration. A NULL crypto_provider_info_t indicates 627 * an already initialized provider descriptor. 628 * 629 * Mechanisms are not added to the kernel's mechanism table if the 630 * provider is a logical provider. 631 * 632 * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one 633 * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY 634 * if the table of mechanisms is full. 635 */ 636 static int 637 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc) 638 { 639 uint_t mech_idx; 640 uint_t cleanup_idx; 641 int err = CRYPTO_SUCCESS; 642 kcf_prov_mech_desc_t *pmd; 643 int desc_use_count = 0; 644 int mcount = desc->pd_mech_list_count; 645 646 if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 647 if (info != NULL) { 648 ASSERT(info->pi_mechanisms != NULL); 649 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 650 sizeof (crypto_mech_info_t) * mcount); 651 } 652 return (CRYPTO_SUCCESS); 653 } 654 655 /* 656 * Copy the mechanism list from the provider info to the provider 657 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t 658 * element if the provider has random_ops since we keep an internal 659 * mechanism, SUN_RANDOM, in this case. 660 */ 661 if (info != NULL) { 662 if (info->pi_ops_vector->co_random_ops != NULL) { 663 crypto_mech_info_t *rand_mi; 664 665 /* 666 * Need the following check as it is possible to have 667 * a provider that implements just random_ops and has 668 * pi_mechanisms == NULL. 669 */ 670 if (info->pi_mechanisms != NULL) { 671 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 672 sizeof (crypto_mech_info_t) * (mcount - 1)); 673 } 674 rand_mi = &desc->pd_mechanisms[mcount - 1]; 675 676 bzero(rand_mi, sizeof (crypto_mech_info_t)); 677 (void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM, 678 CRYPTO_MAX_MECH_NAME); 679 rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM; 680 } else { 681 ASSERT(info->pi_mechanisms != NULL); 682 bcopy(info->pi_mechanisms, desc->pd_mechanisms, 683 sizeof (crypto_mech_info_t) * mcount); 684 } 685 } 686 687 /* 688 * For each mechanism support by the provider, add the provider 689 * to the corresponding KCF mechanism mech_entry chain. 690 */ 691 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) { 692 crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx]; 693 694 if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) && 695 (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) { 696 err = CRYPTO_ARGUMENTS_BAD; 697 break; 698 } 699 700 if (desc->pd_flags & CRYPTO_HASH_NO_UPDATE && 701 mi->cm_func_group_mask & CRYPTO_FG_DIGEST) { 702 /* 703 * We ask the provider to specify the limit 704 * per hash mechanism. But, in practice, a 705 * hardware limitation means all hash mechanisms 706 * will have the same maximum size allowed for 707 * input data. So, we make it a per provider 708 * limit to keep it simple. 709 */ 710 if (mi->cm_max_input_length == 0) { 711 err = CRYPTO_ARGUMENTS_BAD; 712 break; 713 } else { 714 desc->pd_hash_limit = mi->cm_max_input_length; 715 } 716 } 717 718 if (kcf_add_mech_provider(mech_idx, desc, &pmd) != KCF_SUCCESS) 719 break; 720 721 if (pmd == NULL) 722 continue; 723 724 /* The provider will be used for this mechanism */ 725 desc_use_count++; 726 } 727 728 /* 729 * Don't allow multiple software providers with disabled mechanisms 730 * to register. Subsequent enabling of mechanisms will result in 731 * an unsupported configuration, i.e. multiple software providers 732 * per mechanism. 733 */ 734 if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER) 735 return (CRYPTO_ARGUMENTS_BAD); 736 737 if (err == KCF_SUCCESS) 738 return (CRYPTO_SUCCESS); 739 740 /* 741 * An error occurred while adding the mechanism, cleanup 742 * and bail. 743 */ 744 for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) { 745 kcf_remove_mech_provider( 746 desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc); 747 } 748 749 if (err == KCF_MECH_TAB_FULL) 750 return (CRYPTO_HOST_MEMORY); 751 752 return (CRYPTO_ARGUMENTS_BAD); 753 } 754 755 /* 756 * Update routine for kstat. Only privileged users are allowed to 757 * access this information, since this information is sensitive. 758 * There are some cryptographic attacks (e.g. traffic analysis) 759 * which can use this information. 760 */ 761 static int 762 kcf_prov_kstat_update(kstat_t *ksp, int rw) 763 { 764 kcf_prov_stats_t *ks_data; 765 kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private; 766 767 if (rw == KSTAT_WRITE) 768 return (EACCES); 769 770 ks_data = ksp->ks_data; 771 772 if (secpolicy_sys_config(CRED(), B_TRUE) != 0) { 773 ks_data->ps_ops_total.value.ui64 = 0; 774 ks_data->ps_ops_passed.value.ui64 = 0; 775 ks_data->ps_ops_failed.value.ui64 = 0; 776 ks_data->ps_ops_busy_rval.value.ui64 = 0; 777 } else { 778 ks_data->ps_ops_total.value.ui64 = 779 pd->pd_sched_info.ks_ndispatches; 780 ks_data->ps_ops_failed.value.ui64 = 781 pd->pd_sched_info.ks_nfails; 782 ks_data->ps_ops_busy_rval.value.ui64 = 783 pd->pd_sched_info.ks_nbusy_rval; 784 ks_data->ps_ops_passed.value.ui64 = 785 pd->pd_sched_info.ks_ndispatches - 786 pd->pd_sched_info.ks_nfails - 787 pd->pd_sched_info.ks_nbusy_rval; 788 } 789 790 return (0); 791 } 792 793 794 /* 795 * Utility routine called from failure paths in crypto_register_provider() 796 * and from crypto_load_soft_disabled(). 797 */ 798 void 799 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov) 800 { 801 uint_t mech_idx; 802 803 /* remove the provider from the mechanisms tables */ 804 for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; 805 mech_idx++) { 806 kcf_remove_mech_provider( 807 desc->pd_mechanisms[mech_idx].cm_mech_name, desc); 808 } 809 810 /* remove provider from providers table */ 811 if (remove_prov) 812 (void) kcf_prov_tab_rem_provider(desc->pd_prov_id); 813 } 814 815 static void 816 undo_register_provider_extra(kcf_provider_desc_t *desc) 817 { 818 delete_kstat(desc); 819 undo_register_provider(desc, B_TRUE); 820 } 821 822 /* 823 * Utility routine called from crypto_load_soft_disabled(). Callers 824 * should have done a prior undo_register_provider(). 825 */ 826 void 827 redo_register_provider(kcf_provider_desc_t *pd) 828 { 829 /* process the mechanisms supported by the provider */ 830 (void) init_prov_mechs(NULL, pd); 831 832 /* 833 * Hold provider in providers table. We should not call 834 * kcf_prov_tab_add_provider() here as the provider descriptor 835 * is still valid which means it has an entry in the provider 836 * table. 837 */ 838 KCF_PROV_REFHOLD(pd); 839 KCF_PROV_IREFHOLD(pd); 840 } 841 842 /* 843 * Add provider (p1) to another provider's array of providers (p2). 844 * Hardware and logical providers use this array to cross-reference 845 * each other. 846 */ 847 static void 848 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) 849 { 850 kcf_provider_list_t *new; 851 852 new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP); 853 mutex_enter(&p2->pd_lock); 854 new->pl_next = p2->pd_provider_list; 855 p2->pd_provider_list = new; 856 KCF_PROV_IREFHOLD(p1); 857 new->pl_provider = p1; 858 mutex_exit(&p2->pd_lock); 859 } 860 861 /* 862 * Remove provider (p1) from another provider's array of providers (p2). 863 * Hardware and logical providers use this array to cross-reference 864 * each other. 865 */ 866 static void 867 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2) 868 { 869 870 kcf_provider_list_t *pl = NULL, **prev; 871 872 mutex_enter(&p2->pd_lock); 873 for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list; 874 pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) { 875 if (pl->pl_provider == p1) { 876 break; 877 } 878 } 879 880 if (p1 == NULL) { 881 mutex_exit(&p2->pd_lock); 882 return; 883 } 884 885 /* detach and free kcf_provider_list structure */ 886 KCF_PROV_IREFRELE(p1); 887 *prev = pl->pl_next; 888 kmem_free(pl, sizeof (*pl)); 889 mutex_exit(&p2->pd_lock); 890 } 891 892 /* 893 * Convert an array of logical provider handles (crypto_provider_id) 894 * stored in a crypto_provider_info structure into an array of provider 895 * descriptors (kcf_provider_desc_t) attached to a logical provider. 896 */ 897 static void 898 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp) 899 { 900 kcf_provider_desc_t *lp; 901 crypto_provider_id_t handle; 902 int count = info->pi_logical_provider_count; 903 int i; 904 905 /* add hardware provider to each logical provider */ 906 for (i = 0; i < count; i++) { 907 handle = info->pi_logical_providers[i]; 908 lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle); 909 if (lp == NULL) { 910 continue; 911 } 912 add_provider_to_array(hp, lp); 913 hp->pd_flags |= KCF_LPROV_MEMBER; 914 915 /* 916 * A hardware provider has to have the provider descriptor of 917 * every logical provider it belongs to, so it can be removed 918 * from the logical provider if the hardware provider 919 * unregisters from the framework. 920 */ 921 add_provider_to_array(lp, hp); 922 KCF_PROV_REFRELE(lp); 923 } 924 } 925 926 /* 927 * This routine removes a provider from all of the logical or 928 * hardware providers it belongs to, and frees the provider's 929 * array of pointers to providers. 930 */ 931 static void 932 remove_provider(kcf_provider_desc_t *pp) 933 { 934 kcf_provider_desc_t *p; 935 kcf_provider_list_t *e, *next; 936 937 mutex_enter(&pp->pd_lock); 938 for (e = pp->pd_provider_list; e != NULL; e = next) { 939 p = e->pl_provider; 940 remove_provider_from_array(pp, p); 941 if (p->pd_prov_type == CRYPTO_HW_PROVIDER && 942 p->pd_provider_list == NULL) 943 p->pd_flags &= ~KCF_LPROV_MEMBER; 944 KCF_PROV_IREFRELE(p); 945 next = e->pl_next; 946 kmem_free(e, sizeof (*e)); 947 } 948 pp->pd_provider_list = NULL; 949 mutex_exit(&pp->pd_lock); 950 } 951 952 /* 953 * Dispatch events as needed for a provider. is_added flag tells 954 * whether the provider is registering or unregistering. 955 */ 956 void 957 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added) 958 { 959 int i; 960 crypto_notify_event_change_t ec; 961 962 ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED); 963 964 /* 965 * Inform interested clients of the mechanisms becoming 966 * available/unavailable. We skip this for logical providers 967 * as they do not affect mechanisms. 968 */ 969 if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { 970 ec.ec_provider_type = prov_desc->pd_prov_type; 971 ec.ec_change = is_added ? CRYPTO_MECH_ADDED : 972 CRYPTO_MECH_REMOVED; 973 for (i = 0; i < prov_desc->pd_mech_list_count; i++) { 974 /* Skip any mechanisms not allowed by the policy */ 975 if (is_mech_disabled(prov_desc, 976 prov_desc->pd_mechanisms[i].cm_mech_name)) 977 continue; 978 979 (void) strncpy(ec.ec_mech_name, 980 prov_desc->pd_mechanisms[i].cm_mech_name, 981 CRYPTO_MAX_MECH_NAME); 982 kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec); 983 } 984 985 } 986 987 /* 988 * Inform interested clients about the new or departing provider. 989 * In case of a logical provider, we need to notify the event only 990 * for the logical provider and not for the underlying 991 * providers which are known by the KCF_LPROV_MEMBER bit. 992 */ 993 if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER || 994 (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) { 995 kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED : 996 CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc); 997 } 998 } 999 1000 static void 1001 delete_kstat(kcf_provider_desc_t *desc) 1002 { 1003 /* destroy the kstat created for this provider */ 1004 if (desc->pd_kstat != NULL) { 1005 kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private; 1006 1007 /* release reference held by desc->pd_kstat->ks_private */ 1008 ASSERT(desc == kspd); 1009 kstat_delete(kspd->pd_kstat); 1010 desc->pd_kstat = NULL; 1011 KCF_PROV_REFRELE(kspd); 1012 KCF_PROV_IREFRELE(kspd); 1013 } 1014 } 1015