1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file contains routines which call into a provider's 30 * entry points and do other related work. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/taskq_impl.h> 36 #include <sys/cmn_err.h> 37 38 #include <sys/crypto/common.h> 39 #include <sys/crypto/impl.h> 40 #include <sys/crypto/sched_impl.h> 41 42 /* 43 * Return B_TRUE if the specified entry point is NULL. We rely on the 44 * caller to provide, with offset_1 and offset_2, information to calculate 45 * the location of the entry point. The ops argument is a temporary local 46 * variable defined as caddr_t *. 47 */ 48 #define KCF_PROV_NULL_ENTRY_POINT(pd, o1, o2, ops) \ 49 (ops = (caddr_t *)((caddr_t)(pd)->pd_ops_vector + (o1)), \ 50 (*ops == NULL || *(caddr_t *)((caddr_t)(*ops) + (o2)) == NULL)) 51 52 53 static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *, 54 kcf_req_params_t *); 55 void 56 kcf_free_triedlist(kcf_prov_tried_t *list) 57 { 58 kcf_prov_tried_t *l; 59 60 while ((l = list) != NULL) { 61 list = list->pt_next; 62 KCF_PROV_REFRELE(l->pt_pd); 63 kmem_free(l, sizeof (kcf_prov_tried_t)); 64 } 65 } 66 67 kcf_prov_tried_t * 68 kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd, 69 int kmflag) 70 { 71 kcf_prov_tried_t *l; 72 73 l = kmem_alloc(sizeof (kcf_prov_tried_t), kmflag); 74 if (l == NULL) 75 return (NULL); 76 77 l->pt_pd = pd; 78 l->pt_next = *list; 79 *list = l; 80 81 return (l); 82 } 83 84 static boolean_t 85 is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl) 86 { 87 while (triedl != NULL) { 88 if (triedl->pt_pd == pd) 89 return (B_TRUE); 90 triedl = triedl->pt_next; 91 }; 92 93 return (B_FALSE); 94 } 95 96 /* 97 * Search a mech entry's hardware provider list for the specified 98 * provider. Return true if found. 99 */ 100 static boolean_t 101 is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me, 102 crypto_func_group_t fg) 103 { 104 kcf_prov_mech_desc_t *prov_chain; 105 106 prov_chain = me->me_hw_prov_chain; 107 if (prov_chain != NULL) { 108 ASSERT(me->me_num_hwprov > 0); 109 for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) { 110 if (prov_chain->pm_prov_desc == pd && 111 IS_FG_SUPPORTED(prov_chain, fg)) { 112 return (B_TRUE); 113 } 114 } 115 } 116 return (B_FALSE); 117 } 118 119 /* 120 * This routine, given a logical provider, returns the least loaded 121 * provider belonging to the logical provider. The provider must be 122 * able to do the specified mechanism, i.e. check that the mechanism 123 * hasn't been disabled. In addition, just in case providers are not 124 * entirely equivalent, the provider's entry point is checked for 125 * non-nullness. This is accomplished by having the caller pass, as 126 * arguments, the offset of the function group (offset_1), and the 127 * offset of the function within the function group (offset_2). 128 * Returns NULL if no provider can be found. 129 */ 130 int 131 kcf_get_hardware_provider(crypto_mech_type_t mech_type_1, 132 crypto_mech_type_t mech_type_2, boolean_t call_restrict, 133 kcf_provider_desc_t *old, kcf_provider_desc_t **new, crypto_func_group_t fg) 134 { 135 kcf_provider_desc_t *provider, *real_pd = old; 136 kcf_provider_desc_t *gpd = NULL; /* good provider */ 137 kcf_provider_desc_t *bpd = NULL; /* busy provider */ 138 kcf_provider_list_t *p; 139 kcf_ops_class_t class; 140 kcf_mech_entry_t *me; 141 kcf_mech_entry_tab_t *me_tab; 142 int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS; 143 144 /* get the mech entry for the specified mechanism */ 145 class = KCF_MECH2CLASS(mech_type_1); 146 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { 147 return (CRYPTO_MECHANISM_INVALID); 148 } 149 150 me_tab = &kcf_mech_tabs_tab[class]; 151 index = KCF_MECH2INDEX(mech_type_1); 152 if ((index < 0) || (index >= me_tab->met_size)) { 153 return (CRYPTO_MECHANISM_INVALID); 154 } 155 156 me = &((me_tab->met_tab)[index]); 157 mutex_enter(&me->me_mutex); 158 159 /* 160 * We assume the provider descriptor will not go away because 161 * it is being held somewhere, i.e. its reference count has been 162 * incremented. In the case of the crypto module, the provider 163 * descriptor is held by the session structure. 164 */ 165 if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 166 if (old->pd_provider_list == NULL) { 167 real_pd = NULL; 168 rv = CRYPTO_DEVICE_ERROR; 169 goto out; 170 } 171 /* 172 * Find the least loaded real provider. KCF_PROV_LOAD gives 173 * the load (number of pending requests) of the provider. 174 */ 175 mutex_enter(&old->pd_lock); 176 p = old->pd_provider_list; 177 while (p != NULL) { 178 provider = p->pl_provider; 179 180 ASSERT(provider->pd_prov_type != 181 CRYPTO_LOGICAL_PROVIDER); 182 183 if (call_restrict && 184 (provider->pd_flags & KCF_PROV_RESTRICTED)) { 185 p = p->pl_next; 186 continue; 187 } 188 189 if (!is_valid_provider_for_mech(provider, me, fg)) { 190 p = p->pl_next; 191 continue; 192 } 193 194 /* provider does second mech */ 195 if (mech_type_2 != CRYPTO_MECH_INVALID) { 196 int i; 197 198 i = KCF_TO_PROV_MECH_INDX(provider, 199 mech_type_2); 200 if (i == KCF_INVALID_INDX) { 201 p = p->pl_next; 202 continue; 203 } 204 } 205 206 if (provider->pd_state != KCF_PROV_READY) { 207 /* choose BUSY if no READY providers */ 208 if (provider->pd_state == KCF_PROV_BUSY) 209 bpd = provider; 210 p = p->pl_next; 211 continue; 212 } 213 214 len = KCF_PROV_LOAD(provider); 215 if (len < gqlen) { 216 gqlen = len; 217 gpd = provider; 218 } 219 220 p = p->pl_next; 221 } 222 223 if (gpd != NULL) { 224 real_pd = gpd; 225 KCF_PROV_REFHOLD(real_pd); 226 } else if (bpd != NULL) { 227 real_pd = bpd; 228 KCF_PROV_REFHOLD(real_pd); 229 } else { 230 /* can't find provider */ 231 real_pd = NULL; 232 rv = CRYPTO_MECHANISM_INVALID; 233 } 234 mutex_exit(&old->pd_lock); 235 236 } else { 237 if (!KCF_IS_PROV_USABLE(old) || 238 (call_restrict && (old->pd_flags & KCF_PROV_RESTRICTED))) { 239 real_pd = NULL; 240 rv = CRYPTO_DEVICE_ERROR; 241 goto out; 242 } 243 244 if (!is_valid_provider_for_mech(old, me, fg)) { 245 real_pd = NULL; 246 rv = CRYPTO_MECHANISM_INVALID; 247 goto out; 248 } 249 250 KCF_PROV_REFHOLD(real_pd); 251 } 252 out: 253 mutex_exit(&me->me_mutex); 254 *new = real_pd; 255 return (rv); 256 } 257 258 /* 259 * This routine, given a logical provider, returns the least loaded 260 * provider belonging to the logical provider. Just in case providers 261 * are not entirely equivalent, the provider's entry point is checked 262 * for non-nullness. This is accomplished by having the caller pass, as 263 * arguments, the offset of the function group (offset_1), and the 264 * offset of the function within the function group (offset_2). 265 * Returns NULL if no provider can be found. 266 */ 267 int 268 kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2, 269 boolean_t call_restrict, kcf_provider_desc_t *old, 270 kcf_provider_desc_t **new) 271 { 272 kcf_provider_desc_t *provider, *real_pd = old; 273 kcf_provider_desc_t *gpd = NULL; /* good provider */ 274 kcf_provider_desc_t *bpd = NULL; /* busy provider */ 275 kcf_provider_list_t *p; 276 caddr_t *ops; 277 int len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS; 278 279 /* 280 * We assume the provider descriptor will not go away because 281 * it is being held somewhere, i.e. its reference count has been 282 * incremented. In the case of the crypto module, the provider 283 * descriptor is held by the session structure. 284 */ 285 if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 286 if (old->pd_provider_list == NULL) { 287 real_pd = NULL; 288 rv = CRYPTO_DEVICE_ERROR; 289 goto out; 290 } 291 /* 292 * Find the least loaded real provider. KCF_PROV_LOAD gives 293 * the load (number of pending requests) of the provider. 294 */ 295 mutex_enter(&old->pd_lock); 296 p = old->pd_provider_list; 297 while (p != NULL) { 298 provider = p->pl_provider; 299 300 ASSERT(provider->pd_prov_type != 301 CRYPTO_LOGICAL_PROVIDER); 302 303 if (call_restrict && 304 (provider->pd_flags & KCF_PROV_RESTRICTED)) { 305 p = p->pl_next; 306 continue; 307 } 308 if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1, 309 offset_2, ops)) { 310 p = p->pl_next; 311 continue; 312 } 313 314 if (provider->pd_state != KCF_PROV_READY) { 315 /* choose BUSY if no READY providers */ 316 if (provider->pd_state == KCF_PROV_BUSY) 317 bpd = provider; 318 p = p->pl_next; 319 continue; 320 } 321 322 len = KCF_PROV_LOAD(provider); 323 if (len < gqlen) { 324 gqlen = len; 325 gpd = provider; 326 } 327 328 p = p->pl_next; 329 } 330 mutex_exit(&old->pd_lock); 331 332 if (gpd != NULL) { 333 real_pd = gpd; 334 KCF_PROV_REFHOLD(real_pd); 335 } else if (bpd != NULL) { 336 real_pd = bpd; 337 KCF_PROV_REFHOLD(real_pd); 338 } else { 339 /* can't find provider */ 340 real_pd = NULL; 341 rv = CRYPTO_DEVICE_ERROR; 342 } 343 344 } else { 345 if (!KCF_IS_PROV_USABLE(old) || 346 (call_restrict && (old->pd_flags & KCF_PROV_RESTRICTED))) { 347 real_pd = NULL; 348 rv = CRYPTO_DEVICE_ERROR; 349 goto out; 350 } 351 352 if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) { 353 real_pd = NULL; 354 rv = CRYPTO_NOT_SUPPORTED; 355 goto out; 356 } 357 KCF_PROV_REFHOLD(real_pd); 358 } 359 out: 360 *new = real_pd; 361 return (rv); 362 } 363 364 /* 365 * Return the next member of a logical provider, given the previous 366 * member. The function returns true if the next member is found and 367 * bumps its refcnt before returning. 368 */ 369 boolean_t 370 kcf_get_next_logical_provider_member(kcf_provider_desc_t *logical_provider, 371 kcf_provider_desc_t *prev, kcf_provider_desc_t **pd) 372 { 373 kcf_provider_list_t *p; 374 kcf_provider_desc_t *next; 375 376 ASSERT(MUTEX_HELD(&logical_provider->pd_lock)); 377 p = logical_provider->pd_provider_list; 378 while (p != NULL) { 379 /* start the search */ 380 if (prev == NULL) { 381 next = p->pl_provider; 382 goto found; 383 } else { 384 /* find where we were before */ 385 if (p->pl_provider == prev) { 386 if (p->pl_next != NULL) { 387 next = p->pl_next->pl_provider; 388 goto found; 389 } 390 } 391 } 392 p = p->pl_next; 393 } 394 return (B_FALSE); 395 396 found: 397 KCF_PROV_REFHOLD(next); 398 *pd = next; 399 return (B_TRUE); 400 } 401 402 /* 403 * Return the best provider for the specified mechanism. The provider 404 * is held and it is the caller's responsibility to release it when done. 405 * The fg input argument is used as a search criterion to pick a provider. 406 * A provider has to support this function group to be picked. 407 * 408 * Find the least loaded provider in the list of providers. We do a linear 409 * search to find one. This is fine as we assume there are only a few 410 * number of providers in this list. If this assumption ever changes, 411 * we should revisit this. 412 * 413 * call_restrict represents if the caller should not be allowed to 414 * use restricted providers. 415 */ 416 kcf_provider_desc_t * 417 kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp, 418 int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg, 419 boolean_t call_restrict, size_t data_size) 420 { 421 kcf_provider_desc_t *pd = NULL, *gpd = NULL; 422 kcf_prov_mech_desc_t *prov_chain, *mdesc; 423 int len, gqlen = INT_MAX; 424 kcf_ops_class_t class; 425 int index; 426 kcf_mech_entry_t *me; 427 kcf_mech_entry_tab_t *me_tab; 428 429 class = KCF_MECH2CLASS(mech_type); 430 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { 431 *error = CRYPTO_MECHANISM_INVALID; 432 return (NULL); 433 } 434 435 me_tab = &kcf_mech_tabs_tab[class]; 436 index = KCF_MECH2INDEX(mech_type); 437 if ((index < 0) || (index >= me_tab->met_size)) { 438 *error = CRYPTO_MECHANISM_INVALID; 439 return (NULL); 440 } 441 442 me = &((me_tab->met_tab)[index]); 443 if (mepp != NULL) 444 *mepp = me; 445 446 mutex_enter(&me->me_mutex); 447 448 prov_chain = me->me_hw_prov_chain; 449 450 /* 451 * We check for the threshhold for using a hardware provider for 452 * this amount of data. If there is no software provider available 453 * for the mechanism, then the threshold is ignored. 454 */ 455 if ((prov_chain != NULL) && 456 ((data_size == 0) || (me->me_threshold == 0) || 457 (data_size >= me->me_threshold) || 458 ((mdesc = me->me_sw_prov) == NULL) || 459 (!IS_FG_SUPPORTED(mdesc, fg)) || 460 (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) { 461 ASSERT(me->me_num_hwprov > 0); 462 /* there is at least one provider */ 463 464 /* 465 * Find the least loaded real provider. KCF_PROV_LOAD gives 466 * the load (number of pending requests) of the provider. 467 */ 468 while (prov_chain != NULL) { 469 pd = prov_chain->pm_prov_desc; 470 471 if (!IS_FG_SUPPORTED(prov_chain, fg) || 472 !KCF_IS_PROV_USABLE(pd) || 473 IS_PROVIDER_TRIED(pd, triedl) || 474 (call_restrict && 475 (pd->pd_flags & KCF_PROV_RESTRICTED))) { 476 prov_chain = prov_chain->pm_next; 477 continue; 478 } 479 480 if ((len = KCF_PROV_LOAD(pd)) < gqlen) { 481 gqlen = len; 482 gpd = pd; 483 } 484 485 prov_chain = prov_chain->pm_next; 486 } 487 488 pd = gpd; 489 } 490 491 /* No HW provider for this mech, is there a SW provider? */ 492 if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) { 493 pd = mdesc->pm_prov_desc; 494 if (!IS_FG_SUPPORTED(mdesc, fg) || 495 !KCF_IS_PROV_USABLE(pd) || 496 IS_PROVIDER_TRIED(pd, triedl) || 497 (call_restrict && (pd->pd_flags & KCF_PROV_RESTRICTED))) 498 pd = NULL; 499 } 500 501 if (pd == NULL) { 502 /* 503 * We do not want to report CRYPTO_MECH_NOT_SUPPORTED, when 504 * we are in the "fallback to the next provider" case. Rather 505 * we preserve the error, so that the client gets the right 506 * error code. 507 */ 508 if (triedl == NULL) 509 *error = CRYPTO_MECH_NOT_SUPPORTED; 510 } else 511 KCF_PROV_REFHOLD(pd); 512 513 mutex_exit(&me->me_mutex); 514 return (pd); 515 } 516 517 /* 518 * Very similar to kcf_get_mech_provider(). Finds the best provider capable of 519 * a dual operation with both me1 and me2. 520 * When no dual-ops capable providers are available, return the best provider 521 * for me1 only, and sets *prov_mt2 to CRYPTO_INVALID_MECHID; 522 * We assume/expect that a slower HW capable of the dual is still 523 * faster than the 2 fastest providers capable of the individual ops 524 * separately. 525 */ 526 kcf_provider_desc_t * 527 kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2, 528 kcf_mech_entry_t **mepp, crypto_mech_type_t *prov_mt1, 529 crypto_mech_type_t *prov_mt2, int *error, kcf_prov_tried_t *triedl, 530 crypto_func_group_t fg1, crypto_func_group_t fg2, boolean_t call_restrict, 531 size_t data_size) 532 { 533 kcf_provider_desc_t *pd = NULL, *pdm1 = NULL, *pdm1m2 = NULL; 534 kcf_prov_mech_desc_t *prov_chain, *mdesc; 535 int len, gqlen = INT_MAX, dgqlen = INT_MAX; 536 crypto_mech_info_list_t *mil; 537 crypto_mech_type_t m2id = mech2->cm_type; 538 kcf_mech_entry_t *me; 539 540 /* when mech is a valid mechanism, me will be its mech_entry */ 541 if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) { 542 *error = CRYPTO_MECHANISM_INVALID; 543 return (NULL); 544 } 545 546 *prov_mt2 = CRYPTO_MECH_INVALID; 547 548 if (mepp != NULL) 549 *mepp = me; 550 mutex_enter(&me->me_mutex); 551 552 prov_chain = me->me_hw_prov_chain; 553 /* 554 * We check the threshold for using a hardware provider for 555 * this amount of data. If there is no software provider available 556 * for the first mechanism, then the threshold is ignored. 557 */ 558 if ((prov_chain != NULL) && 559 ((data_size == 0) || (me->me_threshold == 0) || 560 (data_size >= me->me_threshold) || 561 ((mdesc = me->me_sw_prov) == NULL) || 562 (!IS_FG_SUPPORTED(mdesc, fg1)) || 563 (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) { 564 /* there is at least one provider */ 565 ASSERT(me->me_num_hwprov > 0); 566 567 /* 568 * Find the least loaded provider capable of the combo 569 * me1 + me2, and save a pointer to the least loaded 570 * provider capable of me1 only. 571 */ 572 while (prov_chain != NULL) { 573 pd = prov_chain->pm_prov_desc; 574 len = KCF_PROV_LOAD(pd); 575 576 if (!IS_FG_SUPPORTED(prov_chain, fg1) || 577 !KCF_IS_PROV_USABLE(pd) || 578 IS_PROVIDER_TRIED(pd, triedl) || 579 (call_restrict && 580 (pd->pd_flags & KCF_PROV_RESTRICTED))) { 581 prov_chain = prov_chain->pm_next; 582 continue; 583 } 584 585 /* Save the best provider capable of m1 */ 586 if (len < gqlen) { 587 *prov_mt1 = 588 prov_chain->pm_mech_info.cm_mech_number; 589 gqlen = len; 590 pdm1 = pd; 591 } 592 593 /* See if pd can do me2 too */ 594 for (mil = prov_chain->pm_mi_list; 595 mil != NULL; mil = mil->ml_next) { 596 if ((mil->ml_mech_info.cm_func_group_mask & 597 fg2) == 0) 598 continue; 599 600 if ((mil->ml_kcf_mechid == m2id) && 601 (len < dgqlen)) { 602 /* Bingo! */ 603 dgqlen = len; 604 pdm1m2 = pd; 605 *prov_mt2 = 606 mil->ml_mech_info.cm_mech_number; 607 *prov_mt1 = prov_chain-> 608 pm_mech_info.cm_mech_number; 609 break; 610 } 611 } 612 613 prov_chain = prov_chain->pm_next; 614 } 615 616 pd = (pdm1m2 != NULL) ? pdm1m2 : pdm1; 617 } 618 619 /* no HW provider for this mech, is there a SW provider? */ 620 if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) { 621 pd = mdesc->pm_prov_desc; 622 if (!IS_FG_SUPPORTED(mdesc, fg1) || 623 !KCF_IS_PROV_USABLE(pd) || 624 IS_PROVIDER_TRIED(pd, triedl) || 625 (call_restrict && (pd->pd_flags & KCF_PROV_RESTRICTED))) 626 pd = NULL; 627 else { 628 /* See if pd can do me2 too */ 629 for (mil = me->me_sw_prov->pm_mi_list; 630 mil != NULL; mil = mil->ml_next) { 631 if ((mil->ml_mech_info.cm_func_group_mask & 632 fg2) == 0) 633 continue; 634 635 if (mil->ml_kcf_mechid == m2id) { 636 /* Bingo! */ 637 *prov_mt2 = 638 mil->ml_mech_info.cm_mech_number; 639 break; 640 } 641 } 642 *prov_mt1 = me->me_sw_prov->pm_mech_info.cm_mech_number; 643 } 644 } 645 646 if (pd == NULL) 647 *error = CRYPTO_MECH_NOT_SUPPORTED; 648 else 649 KCF_PROV_REFHOLD(pd); 650 651 mutex_exit(&me->me_mutex); 652 return (pd); 653 } 654 655 /* 656 * Do the actual work of calling the provider routines. 657 * 658 * pd - Provider structure 659 * ctx - Context for this operation 660 * params - Parameters for this operation 661 * rhndl - Request handle to use for notification 662 * 663 * The return values are the same as that of the respective SPI. 664 */ 665 int 666 common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 667 kcf_req_params_t *params, crypto_req_handle_t rhndl) 668 { 669 int err = CRYPTO_ARGUMENTS_BAD; 670 kcf_op_type_t optype; 671 672 optype = params->rp_optype; 673 674 switch (params->rp_opgrp) { 675 case KCF_OG_DIGEST: { 676 kcf_digest_ops_params_t *dops = ¶ms->rp_u.digest_params; 677 678 switch (optype) { 679 case KCF_OP_INIT: 680 /* 681 * We should do this only here and not in KCF_WRAP_* 682 * macros. This is because we may want to try other 683 * providers, in case we recover from a failure. 684 */ 685 KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype, 686 pd, &dops->do_mech); 687 688 err = KCF_PROV_DIGEST_INIT(pd, ctx, &dops->do_mech, 689 rhndl); 690 break; 691 692 case KCF_OP_SINGLE: 693 err = KCF_PROV_DIGEST(pd, ctx, dops->do_data, 694 dops->do_digest, rhndl); 695 break; 696 697 case KCF_OP_UPDATE: 698 err = KCF_PROV_DIGEST_UPDATE(pd, ctx, 699 dops->do_data, rhndl); 700 break; 701 702 case KCF_OP_FINAL: 703 err = KCF_PROV_DIGEST_FINAL(pd, ctx, 704 dops->do_digest, rhndl); 705 break; 706 707 case KCF_OP_ATOMIC: 708 ASSERT(ctx == NULL); 709 KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype, 710 pd, &dops->do_mech); 711 err = KCF_PROV_DIGEST_ATOMIC(pd, dops->do_sid, 712 &dops->do_mech, dops->do_data, dops->do_digest, 713 rhndl); 714 break; 715 716 case KCF_OP_DIGEST_KEY: 717 err = KCF_PROV_DIGEST_KEY(pd, ctx, dops->do_digest_key, 718 rhndl); 719 break; 720 721 default: 722 break; 723 } 724 break; 725 } 726 727 case KCF_OG_MAC: { 728 kcf_mac_ops_params_t *mops = ¶ms->rp_u.mac_params; 729 730 switch (optype) { 731 case KCF_OP_INIT: 732 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 733 pd, &mops->mo_mech); 734 735 err = KCF_PROV_MAC_INIT(pd, ctx, &mops->mo_mech, 736 mops->mo_key, mops->mo_templ, rhndl); 737 break; 738 739 case KCF_OP_SINGLE: 740 err = KCF_PROV_MAC(pd, ctx, mops->mo_data, 741 mops->mo_mac, rhndl); 742 break; 743 744 case KCF_OP_UPDATE: 745 err = KCF_PROV_MAC_UPDATE(pd, ctx, mops->mo_data, 746 rhndl); 747 break; 748 749 case KCF_OP_FINAL: 750 err = KCF_PROV_MAC_FINAL(pd, ctx, mops->mo_mac, rhndl); 751 break; 752 753 case KCF_OP_ATOMIC: 754 ASSERT(ctx == NULL); 755 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 756 pd, &mops->mo_mech); 757 758 err = KCF_PROV_MAC_ATOMIC(pd, mops->mo_sid, 759 &mops->mo_mech, mops->mo_key, mops->mo_data, 760 mops->mo_mac, mops->mo_templ, rhndl); 761 break; 762 763 case KCF_OP_MAC_VERIFY_ATOMIC: 764 ASSERT(ctx == NULL); 765 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 766 pd, &mops->mo_mech); 767 768 err = KCF_PROV_MAC_VERIFY_ATOMIC(pd, mops->mo_sid, 769 &mops->mo_mech, mops->mo_key, mops->mo_data, 770 mops->mo_mac, mops->mo_templ, rhndl); 771 break; 772 773 default: 774 break; 775 } 776 break; 777 } 778 779 case KCF_OG_ENCRYPT: { 780 kcf_encrypt_ops_params_t *eops = ¶ms->rp_u.encrypt_params; 781 782 switch (optype) { 783 case KCF_OP_INIT: 784 KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype, 785 pd, &eops->eo_mech); 786 787 err = KCF_PROV_ENCRYPT_INIT(pd, ctx, &eops->eo_mech, 788 eops->eo_key, eops->eo_templ, rhndl); 789 break; 790 791 case KCF_OP_SINGLE: 792 err = KCF_PROV_ENCRYPT(pd, ctx, eops->eo_plaintext, 793 eops->eo_ciphertext, rhndl); 794 break; 795 796 case KCF_OP_UPDATE: 797 err = KCF_PROV_ENCRYPT_UPDATE(pd, ctx, 798 eops->eo_plaintext, eops->eo_ciphertext, rhndl); 799 break; 800 801 case KCF_OP_FINAL: 802 err = KCF_PROV_ENCRYPT_FINAL(pd, ctx, 803 eops->eo_ciphertext, rhndl); 804 break; 805 806 case KCF_OP_ATOMIC: 807 ASSERT(ctx == NULL); 808 KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype, 809 pd, &eops->eo_mech); 810 811 err = KCF_PROV_ENCRYPT_ATOMIC(pd, eops->eo_sid, 812 &eops->eo_mech, eops->eo_key, eops->eo_plaintext, 813 eops->eo_ciphertext, eops->eo_templ, rhndl); 814 break; 815 816 default: 817 break; 818 } 819 break; 820 } 821 822 case KCF_OG_DECRYPT: { 823 kcf_decrypt_ops_params_t *dcrops = ¶ms->rp_u.decrypt_params; 824 825 switch (optype) { 826 case KCF_OP_INIT: 827 KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype, 828 pd, &dcrops->dop_mech); 829 830 err = KCF_PROV_DECRYPT_INIT(pd, ctx, &dcrops->dop_mech, 831 dcrops->dop_key, dcrops->dop_templ, rhndl); 832 break; 833 834 case KCF_OP_SINGLE: 835 err = KCF_PROV_DECRYPT(pd, ctx, dcrops->dop_ciphertext, 836 dcrops->dop_plaintext, rhndl); 837 break; 838 839 case KCF_OP_UPDATE: 840 err = KCF_PROV_DECRYPT_UPDATE(pd, ctx, 841 dcrops->dop_ciphertext, dcrops->dop_plaintext, 842 rhndl); 843 break; 844 845 case KCF_OP_FINAL: 846 err = KCF_PROV_DECRYPT_FINAL(pd, ctx, 847 dcrops->dop_plaintext, rhndl); 848 break; 849 850 case KCF_OP_ATOMIC: 851 ASSERT(ctx == NULL); 852 KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype, 853 pd, &dcrops->dop_mech); 854 855 err = KCF_PROV_DECRYPT_ATOMIC(pd, dcrops->dop_sid, 856 &dcrops->dop_mech, dcrops->dop_key, 857 dcrops->dop_ciphertext, dcrops->dop_plaintext, 858 dcrops->dop_templ, rhndl); 859 break; 860 861 default: 862 break; 863 } 864 break; 865 } 866 867 case KCF_OG_SIGN: { 868 kcf_sign_ops_params_t *sops = ¶ms->rp_u.sign_params; 869 870 switch (optype) { 871 case KCF_OP_INIT: 872 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 873 pd, &sops->so_mech); 874 875 err = KCF_PROV_SIGN_INIT(pd, ctx, &sops->so_mech, 876 sops->so_key, sops->so_templ, rhndl); 877 break; 878 879 case KCF_OP_SIGN_RECOVER_INIT: 880 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 881 pd, &sops->so_mech); 882 883 err = KCF_PROV_SIGN_RECOVER_INIT(pd, ctx, 884 &sops->so_mech, sops->so_key, sops->so_templ, 885 rhndl); 886 break; 887 888 case KCF_OP_SINGLE: 889 err = KCF_PROV_SIGN(pd, ctx, sops->so_data, 890 sops->so_signature, rhndl); 891 break; 892 893 case KCF_OP_SIGN_RECOVER: 894 err = KCF_PROV_SIGN_RECOVER(pd, ctx, 895 sops->so_data, sops->so_signature, rhndl); 896 break; 897 898 case KCF_OP_UPDATE: 899 err = KCF_PROV_SIGN_UPDATE(pd, ctx, sops->so_data, 900 rhndl); 901 break; 902 903 case KCF_OP_FINAL: 904 err = KCF_PROV_SIGN_FINAL(pd, ctx, sops->so_signature, 905 rhndl); 906 break; 907 908 case KCF_OP_ATOMIC: 909 ASSERT(ctx == NULL); 910 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 911 pd, &sops->so_mech); 912 913 err = KCF_PROV_SIGN_ATOMIC(pd, sops->so_sid, 914 &sops->so_mech, sops->so_key, sops->so_data, 915 sops->so_templ, sops->so_signature, rhndl); 916 break; 917 918 case KCF_OP_SIGN_RECOVER_ATOMIC: 919 ASSERT(ctx == NULL); 920 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 921 pd, &sops->so_mech); 922 923 err = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, sops->so_sid, 924 &sops->so_mech, sops->so_key, sops->so_data, 925 sops->so_templ, sops->so_signature, rhndl); 926 break; 927 928 default: 929 break; 930 } 931 break; 932 } 933 934 case KCF_OG_VERIFY: { 935 kcf_verify_ops_params_t *vops = ¶ms->rp_u.verify_params; 936 937 switch (optype) { 938 case KCF_OP_INIT: 939 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 940 pd, &vops->vo_mech); 941 942 err = KCF_PROV_VERIFY_INIT(pd, ctx, &vops->vo_mech, 943 vops->vo_key, vops->vo_templ, rhndl); 944 break; 945 946 case KCF_OP_VERIFY_RECOVER_INIT: 947 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 948 pd, &vops->vo_mech); 949 950 err = KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx, 951 &vops->vo_mech, vops->vo_key, vops->vo_templ, 952 rhndl); 953 break; 954 955 case KCF_OP_SINGLE: 956 err = KCF_PROV_VERIFY(pd, ctx, vops->vo_data, 957 vops->vo_signature, rhndl); 958 break; 959 960 case KCF_OP_VERIFY_RECOVER: 961 err = KCF_PROV_VERIFY_RECOVER(pd, ctx, 962 vops->vo_signature, vops->vo_data, rhndl); 963 break; 964 965 case KCF_OP_UPDATE: 966 err = KCF_PROV_VERIFY_UPDATE(pd, ctx, vops->vo_data, 967 rhndl); 968 break; 969 970 case KCF_OP_FINAL: 971 err = KCF_PROV_VERIFY_FINAL(pd, ctx, vops->vo_signature, 972 rhndl); 973 break; 974 975 case KCF_OP_ATOMIC: 976 ASSERT(ctx == NULL); 977 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 978 pd, &vops->vo_mech); 979 980 err = KCF_PROV_VERIFY_ATOMIC(pd, vops->vo_sid, 981 &vops->vo_mech, vops->vo_key, vops->vo_data, 982 vops->vo_templ, vops->vo_signature, rhndl); 983 break; 984 985 case KCF_OP_VERIFY_RECOVER_ATOMIC: 986 ASSERT(ctx == NULL); 987 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 988 pd, &vops->vo_mech); 989 990 err = KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, vops->vo_sid, 991 &vops->vo_mech, vops->vo_key, vops->vo_signature, 992 vops->vo_templ, vops->vo_data, rhndl); 993 break; 994 995 default: 996 break; 997 } 998 break; 999 } 1000 1001 case KCF_OG_ENCRYPT_MAC: { 1002 kcf_encrypt_mac_ops_params_t *eops = 1003 ¶ms->rp_u.encrypt_mac_params; 1004 kcf_context_t *kcf_secondctx; 1005 1006 switch (optype) { 1007 case KCF_OP_INIT: 1008 kcf_secondctx = ((kcf_context_t *) 1009 (ctx->cc_framework_private))->kc_secondctx; 1010 1011 if (kcf_secondctx != NULL) { 1012 err = kcf_emulate_dual(pd, ctx, params); 1013 break; 1014 } 1015 KCF_SET_PROVIDER_MECHNUM( 1016 eops->em_framework_encr_mechtype, 1017 pd, &eops->em_encr_mech); 1018 1019 KCF_SET_PROVIDER_MECHNUM( 1020 eops->em_framework_mac_mechtype, 1021 pd, &eops->em_mac_mech); 1022 1023 err = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx, 1024 &eops->em_encr_mech, eops->em_encr_key, 1025 &eops->em_mac_mech, eops->em_mac_key, 1026 eops->em_encr_templ, eops->em_mac_templ, 1027 rhndl); 1028 1029 break; 1030 1031 case KCF_OP_SINGLE: 1032 err = KCF_PROV_ENCRYPT_MAC(pd, ctx, 1033 eops->em_plaintext, eops->em_ciphertext, 1034 eops->em_mac, rhndl); 1035 break; 1036 1037 case KCF_OP_UPDATE: 1038 kcf_secondctx = ((kcf_context_t *) 1039 (ctx->cc_framework_private))->kc_secondctx; 1040 if (kcf_secondctx != NULL) { 1041 err = kcf_emulate_dual(pd, ctx, params); 1042 break; 1043 } 1044 err = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx, 1045 eops->em_plaintext, eops->em_ciphertext, rhndl); 1046 break; 1047 1048 case KCF_OP_FINAL: 1049 kcf_secondctx = ((kcf_context_t *) 1050 (ctx->cc_framework_private))->kc_secondctx; 1051 if (kcf_secondctx != NULL) { 1052 err = kcf_emulate_dual(pd, ctx, params); 1053 break; 1054 } 1055 err = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx, 1056 eops->em_ciphertext, eops->em_mac, rhndl); 1057 break; 1058 1059 case KCF_OP_ATOMIC: 1060 ASSERT(ctx == NULL); 1061 1062 KCF_SET_PROVIDER_MECHNUM( 1063 eops->em_framework_encr_mechtype, 1064 pd, &eops->em_encr_mech); 1065 1066 KCF_SET_PROVIDER_MECHNUM( 1067 eops->em_framework_mac_mechtype, 1068 pd, &eops->em_mac_mech); 1069 1070 err = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, eops->em_sid, 1071 &eops->em_encr_mech, eops->em_encr_key, 1072 &eops->em_mac_mech, eops->em_mac_key, 1073 eops->em_plaintext, eops->em_ciphertext, 1074 eops->em_mac, 1075 eops->em_encr_templ, eops->em_mac_templ, 1076 rhndl); 1077 1078 break; 1079 1080 default: 1081 break; 1082 } 1083 break; 1084 } 1085 1086 case KCF_OG_MAC_DECRYPT: { 1087 kcf_mac_decrypt_ops_params_t *dops = 1088 ¶ms->rp_u.mac_decrypt_params; 1089 kcf_context_t *kcf_secondctx; 1090 1091 switch (optype) { 1092 case KCF_OP_INIT: 1093 kcf_secondctx = ((kcf_context_t *) 1094 (ctx->cc_framework_private))->kc_secondctx; 1095 1096 if (kcf_secondctx != NULL) { 1097 err = kcf_emulate_dual(pd, ctx, params); 1098 break; 1099 } 1100 KCF_SET_PROVIDER_MECHNUM( 1101 dops->md_framework_mac_mechtype, 1102 pd, &dops->md_mac_mech); 1103 1104 KCF_SET_PROVIDER_MECHNUM( 1105 dops->md_framework_decr_mechtype, 1106 pd, &dops->md_decr_mech); 1107 1108 err = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx, 1109 &dops->md_mac_mech, dops->md_mac_key, 1110 &dops->md_decr_mech, dops->md_decr_key, 1111 dops->md_mac_templ, dops->md_decr_templ, 1112 rhndl); 1113 1114 break; 1115 1116 case KCF_OP_SINGLE: 1117 err = KCF_PROV_MAC_DECRYPT(pd, ctx, 1118 dops->md_ciphertext, dops->md_mac, 1119 dops->md_plaintext, rhndl); 1120 break; 1121 1122 case KCF_OP_UPDATE: 1123 kcf_secondctx = ((kcf_context_t *) 1124 (ctx->cc_framework_private))->kc_secondctx; 1125 if (kcf_secondctx != NULL) { 1126 err = kcf_emulate_dual(pd, ctx, params); 1127 break; 1128 } 1129 err = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx, 1130 dops->md_ciphertext, dops->md_plaintext, rhndl); 1131 break; 1132 1133 case KCF_OP_FINAL: 1134 kcf_secondctx = ((kcf_context_t *) 1135 (ctx->cc_framework_private))->kc_secondctx; 1136 if (kcf_secondctx != NULL) { 1137 err = kcf_emulate_dual(pd, ctx, params); 1138 break; 1139 } 1140 err = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx, 1141 dops->md_mac, dops->md_plaintext, rhndl); 1142 break; 1143 1144 case KCF_OP_ATOMIC: 1145 ASSERT(ctx == NULL); 1146 1147 KCF_SET_PROVIDER_MECHNUM( 1148 dops->md_framework_mac_mechtype, 1149 pd, &dops->md_mac_mech); 1150 1151 KCF_SET_PROVIDER_MECHNUM( 1152 dops->md_framework_decr_mechtype, 1153 pd, &dops->md_decr_mech); 1154 1155 err = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, dops->md_sid, 1156 &dops->md_mac_mech, dops->md_mac_key, 1157 &dops->md_decr_mech, dops->md_decr_key, 1158 dops->md_ciphertext, dops->md_mac, 1159 dops->md_plaintext, 1160 dops->md_mac_templ, dops->md_decr_templ, 1161 rhndl); 1162 1163 break; 1164 1165 case KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC: 1166 ASSERT(ctx == NULL); 1167 1168 KCF_SET_PROVIDER_MECHNUM( 1169 dops->md_framework_mac_mechtype, 1170 pd, &dops->md_mac_mech); 1171 1172 KCF_SET_PROVIDER_MECHNUM( 1173 dops->md_framework_decr_mechtype, 1174 pd, &dops->md_decr_mech); 1175 1176 err = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd, 1177 dops->md_sid, &dops->md_mac_mech, dops->md_mac_key, 1178 &dops->md_decr_mech, dops->md_decr_key, 1179 dops->md_ciphertext, dops->md_mac, 1180 dops->md_plaintext, 1181 dops->md_mac_templ, dops->md_decr_templ, 1182 rhndl); 1183 1184 break; 1185 1186 default: 1187 break; 1188 } 1189 break; 1190 } 1191 1192 case KCF_OG_KEY: { 1193 kcf_key_ops_params_t *kops = ¶ms->rp_u.key_params; 1194 1195 ASSERT(ctx == NULL); 1196 KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd, 1197 &kops->ko_mech); 1198 1199 switch (optype) { 1200 case KCF_OP_KEY_GENERATE: 1201 err = KCF_PROV_KEY_GENERATE(pd, kops->ko_sid, 1202 &kops->ko_mech, 1203 kops->ko_key_template, kops->ko_key_attribute_count, 1204 kops->ko_key_object_id_ptr, rhndl); 1205 break; 1206 1207 case KCF_OP_KEY_GENERATE_PAIR: 1208 err = KCF_PROV_KEY_GENERATE_PAIR(pd, kops->ko_sid, 1209 &kops->ko_mech, 1210 kops->ko_key_template, kops->ko_key_attribute_count, 1211 kops->ko_private_key_template, 1212 kops->ko_private_key_attribute_count, 1213 kops->ko_key_object_id_ptr, 1214 kops->ko_private_key_object_id_ptr, rhndl); 1215 break; 1216 1217 case KCF_OP_KEY_WRAP: 1218 err = KCF_PROV_KEY_WRAP(pd, kops->ko_sid, 1219 &kops->ko_mech, 1220 kops->ko_key, kops->ko_key_object_id_ptr, 1221 kops->ko_wrapped_key, kops->ko_wrapped_key_len_ptr, 1222 rhndl); 1223 break; 1224 1225 case KCF_OP_KEY_UNWRAP: 1226 err = KCF_PROV_KEY_UNWRAP(pd, kops->ko_sid, 1227 &kops->ko_mech, 1228 kops->ko_key, kops->ko_wrapped_key, 1229 kops->ko_wrapped_key_len_ptr, 1230 kops->ko_key_template, kops->ko_key_attribute_count, 1231 kops->ko_key_object_id_ptr, rhndl); 1232 break; 1233 1234 case KCF_OP_KEY_DERIVE: 1235 err = KCF_PROV_KEY_DERIVE(pd, kops->ko_sid, 1236 &kops->ko_mech, 1237 kops->ko_key, kops->ko_key_template, 1238 kops->ko_key_attribute_count, 1239 kops->ko_key_object_id_ptr, rhndl); 1240 break; 1241 1242 default: 1243 break; 1244 } 1245 break; 1246 } 1247 1248 case KCF_OG_RANDOM: { 1249 kcf_random_number_ops_params_t *rops = 1250 ¶ms->rp_u.random_number_params; 1251 1252 ASSERT(ctx == NULL); 1253 1254 switch (optype) { 1255 case KCF_OP_RANDOM_SEED: 1256 err = KCF_PROV_SEED_RANDOM(pd, rops->rn_sid, 1257 rops->rn_buf, rops->rn_buflen, rops->rn_entropy_est, 1258 rops->rn_flags, rhndl); 1259 break; 1260 1261 case KCF_OP_RANDOM_GENERATE: 1262 err = KCF_PROV_GENERATE_RANDOM(pd, rops->rn_sid, 1263 rops->rn_buf, rops->rn_buflen, rhndl); 1264 break; 1265 1266 default: 1267 break; 1268 } 1269 break; 1270 } 1271 1272 case KCF_OG_SESSION: { 1273 kcf_session_ops_params_t *sops = ¶ms->rp_u.session_params; 1274 1275 ASSERT(ctx == NULL); 1276 switch (optype) { 1277 case KCF_OP_SESSION_OPEN: 1278 /* 1279 * so_pd may be a logical provider, in which case 1280 * we need to check whether it has been removed. 1281 */ 1282 if (KCF_IS_PROV_REMOVED(sops->so_pd)) { 1283 err = CRYPTO_DEVICE_ERROR; 1284 break; 1285 } 1286 err = KCF_PROV_SESSION_OPEN(pd, sops->so_sid_ptr, 1287 rhndl, sops->so_pd); 1288 break; 1289 1290 case KCF_OP_SESSION_CLOSE: 1291 /* 1292 * so_pd may be a logical provider, in which case 1293 * we need to check whether it has been removed. 1294 */ 1295 if (KCF_IS_PROV_REMOVED(sops->so_pd)) { 1296 err = CRYPTO_DEVICE_ERROR; 1297 break; 1298 } 1299 err = KCF_PROV_SESSION_CLOSE(pd, sops->so_sid, 1300 rhndl, sops->so_pd); 1301 break; 1302 1303 case KCF_OP_SESSION_LOGIN: 1304 err = KCF_PROV_SESSION_LOGIN(pd, sops->so_sid, 1305 sops->so_user_type, sops->so_pin, 1306 sops->so_pin_len, rhndl); 1307 break; 1308 1309 case KCF_OP_SESSION_LOGOUT: 1310 err = KCF_PROV_SESSION_LOGOUT(pd, sops->so_sid, rhndl); 1311 break; 1312 1313 default: 1314 break; 1315 } 1316 break; 1317 } 1318 1319 case KCF_OG_OBJECT: { 1320 kcf_object_ops_params_t *jops = ¶ms->rp_u.object_params; 1321 1322 ASSERT(ctx == NULL); 1323 switch (optype) { 1324 case KCF_OP_OBJECT_CREATE: 1325 err = KCF_PROV_OBJECT_CREATE(pd, jops->oo_sid, 1326 jops->oo_template, jops->oo_attribute_count, 1327 jops->oo_object_id_ptr, rhndl); 1328 break; 1329 1330 case KCF_OP_OBJECT_COPY: 1331 err = KCF_PROV_OBJECT_COPY(pd, jops->oo_sid, 1332 jops->oo_object_id, 1333 jops->oo_template, jops->oo_attribute_count, 1334 jops->oo_object_id_ptr, rhndl); 1335 break; 1336 1337 case KCF_OP_OBJECT_DESTROY: 1338 err = KCF_PROV_OBJECT_DESTROY(pd, jops->oo_sid, 1339 jops->oo_object_id, rhndl); 1340 break; 1341 1342 case KCF_OP_OBJECT_GET_SIZE: 1343 err = KCF_PROV_OBJECT_GET_SIZE(pd, jops->oo_sid, 1344 jops->oo_object_id, jops->oo_object_size, rhndl); 1345 break; 1346 1347 case KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE: 1348 err = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd, 1349 jops->oo_sid, jops->oo_object_id, 1350 jops->oo_template, jops->oo_attribute_count, rhndl); 1351 break; 1352 1353 case KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE: 1354 err = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd, 1355 jops->oo_sid, jops->oo_object_id, 1356 jops->oo_template, jops->oo_attribute_count, rhndl); 1357 break; 1358 1359 case KCF_OP_OBJECT_FIND_INIT: 1360 err = KCF_PROV_OBJECT_FIND_INIT(pd, jops->oo_sid, 1361 jops->oo_template, jops->oo_attribute_count, 1362 jops->oo_find_init_pp_ptr, rhndl); 1363 break; 1364 1365 case KCF_OP_OBJECT_FIND: 1366 err = KCF_PROV_OBJECT_FIND(pd, jops->oo_find_pp, 1367 jops->oo_object_id_ptr, jops->oo_max_object_count, 1368 jops->oo_object_count_ptr, rhndl); 1369 break; 1370 1371 case KCF_OP_OBJECT_FIND_FINAL: 1372 err = KCF_PROV_OBJECT_FIND_FINAL(pd, jops->oo_find_pp, 1373 rhndl); 1374 break; 1375 1376 default: 1377 break; 1378 } 1379 break; 1380 } 1381 1382 case KCF_OG_PROVMGMT: { 1383 kcf_provmgmt_ops_params_t *pops = ¶ms->rp_u.provmgmt_params; 1384 1385 ASSERT(ctx == NULL); 1386 switch (optype) { 1387 case KCF_OP_MGMT_EXTINFO: 1388 /* 1389 * po_pd may be a logical provider, in which case 1390 * we need to check whether it has been removed. 1391 */ 1392 if (KCF_IS_PROV_REMOVED(pops->po_pd)) { 1393 err = CRYPTO_DEVICE_ERROR; 1394 break; 1395 } 1396 err = KCF_PROV_EXT_INFO(pd, pops->po_ext_info, rhndl, 1397 pops->po_pd); 1398 break; 1399 1400 case KCF_OP_MGMT_INITTOKEN: 1401 err = KCF_PROV_INIT_TOKEN(pd, pops->po_pin, 1402 pops->po_pin_len, pops->po_label, rhndl); 1403 break; 1404 1405 case KCF_OP_MGMT_INITPIN: 1406 err = KCF_PROV_INIT_PIN(pd, pops->po_sid, pops->po_pin, 1407 pops->po_pin_len, rhndl); 1408 break; 1409 1410 case KCF_OP_MGMT_SETPIN: 1411 err = KCF_PROV_SET_PIN(pd, pops->po_sid, 1412 pops->po_old_pin, pops->po_old_pin_len, 1413 pops->po_pin, pops->po_pin_len, rhndl); 1414 break; 1415 1416 default: 1417 break; 1418 } 1419 break; 1420 } 1421 1422 case KCF_OG_NOSTORE_KEY: { 1423 kcf_key_ops_params_t *kops = ¶ms->rp_u.key_params; 1424 1425 ASSERT(ctx == NULL); 1426 KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd, 1427 &kops->ko_mech); 1428 1429 switch (optype) { 1430 case KCF_OP_KEY_GENERATE: 1431 err = KCF_PROV_NOSTORE_KEY_GENERATE(pd, kops->ko_sid, 1432 &kops->ko_mech, kops->ko_key_template, 1433 kops->ko_key_attribute_count, 1434 kops->ko_out_template1, 1435 kops->ko_out_attribute_count1, rhndl); 1436 break; 1437 1438 case KCF_OP_KEY_GENERATE_PAIR: 1439 err = KCF_PROV_NOSTORE_KEY_GENERATE_PAIR(pd, 1440 kops->ko_sid, &kops->ko_mech, 1441 kops->ko_key_template, kops->ko_key_attribute_count, 1442 kops->ko_private_key_template, 1443 kops->ko_private_key_attribute_count, 1444 kops->ko_out_template1, 1445 kops->ko_out_attribute_count1, 1446 kops->ko_out_template2, 1447 kops->ko_out_attribute_count2, 1448 rhndl); 1449 break; 1450 1451 case KCF_OP_KEY_DERIVE: 1452 err = KCF_PROV_NOSTORE_KEY_DERIVE(pd, kops->ko_sid, 1453 &kops->ko_mech, kops->ko_key, 1454 kops->ko_key_template, 1455 kops->ko_key_attribute_count, 1456 kops->ko_out_template1, 1457 kops->ko_out_attribute_count1, rhndl); 1458 break; 1459 1460 default: 1461 break; 1462 } 1463 break; 1464 } 1465 default: 1466 break; 1467 } /* end of switch(params->rp_opgrp) */ 1468 1469 KCF_PROV_INCRSTATS(pd, err); 1470 return (err); 1471 } 1472 1473 /* 1474 * Emulate the call for a multipart dual ops with 2 single steps. 1475 * This routine is always called in the context of a working thread 1476 * running kcf_svc_do_run(). 1477 * The single steps are submitted in a pure synchronous way (blocking). 1478 * When this routine returns, kcf_svc_do_run() will call kcf_aop_done() 1479 * so the originating consumer's callback gets invoked. kcf_aop_done() 1480 * takes care of freeing the operation context. So, this routine does 1481 * not free the operation context. 1482 * 1483 * The provider descriptor is assumed held by the callers. 1484 */ 1485 static int 1486 kcf_emulate_dual(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 1487 kcf_req_params_t *params) 1488 { 1489 int err = CRYPTO_ARGUMENTS_BAD; 1490 kcf_op_type_t optype; 1491 size_t save_len; 1492 off_t save_offset; 1493 1494 optype = params->rp_optype; 1495 1496 switch (params->rp_opgrp) { 1497 case KCF_OG_ENCRYPT_MAC: { 1498 kcf_encrypt_mac_ops_params_t *cmops = 1499 ¶ms->rp_u.encrypt_mac_params; 1500 kcf_context_t *encr_kcf_ctx; 1501 crypto_ctx_t *mac_ctx; 1502 kcf_req_params_t encr_params; 1503 1504 encr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private); 1505 1506 switch (optype) { 1507 case KCF_OP_INIT: { 1508 encr_kcf_ctx->kc_secondctx = NULL; 1509 1510 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_INIT, 1511 pd->pd_sid, &cmops->em_encr_mech, 1512 cmops->em_encr_key, NULL, NULL, 1513 cmops->em_encr_templ); 1514 1515 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1516 B_FALSE); 1517 1518 /* It can't be CRYPTO_QUEUED */ 1519 if (err != CRYPTO_SUCCESS) { 1520 break; 1521 } 1522 1523 err = crypto_mac_init(&cmops->em_mac_mech, 1524 cmops->em_mac_key, cmops->em_mac_templ, 1525 (crypto_context_t *)&mac_ctx, NULL); 1526 1527 if (err == CRYPTO_SUCCESS) { 1528 encr_kcf_ctx->kc_secondctx = (kcf_context_t *) 1529 mac_ctx->cc_framework_private; 1530 KCF_CONTEXT_REFHOLD((kcf_context_t *) 1531 mac_ctx->cc_framework_private); 1532 } 1533 1534 break; 1535 1536 } 1537 case KCF_OP_UPDATE: { 1538 crypto_dual_data_t *ct = cmops->em_ciphertext; 1539 crypto_data_t *pt = cmops->em_plaintext; 1540 kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx; 1541 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1542 1543 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_UPDATE, 1544 pd->pd_sid, NULL, NULL, pt, (crypto_data_t *)ct, 1545 NULL); 1546 1547 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1548 B_FALSE); 1549 1550 /* It can't be CRYPTO_QUEUED */ 1551 if (err != CRYPTO_SUCCESS) { 1552 break; 1553 } 1554 1555 save_offset = ct->dd_offset1; 1556 save_len = ct->dd_len1; 1557 if (ct->dd_len2 == 0) { 1558 /* 1559 * The previous encrypt step was an 1560 * accumulation only and didn't produce any 1561 * partial output 1562 */ 1563 if (ct->dd_len1 == 0) 1564 break; 1565 1566 } else { 1567 ct->dd_offset1 = ct->dd_offset2; 1568 ct->dd_len1 = ct->dd_len2; 1569 } 1570 err = crypto_mac_update((crypto_context_t)mac_ctx, 1571 (crypto_data_t *)ct, NULL); 1572 1573 ct->dd_offset1 = save_offset; 1574 ct->dd_len1 = save_len; 1575 1576 break; 1577 } 1578 case KCF_OP_FINAL: { 1579 crypto_dual_data_t *ct = cmops->em_ciphertext; 1580 crypto_data_t *mac = cmops->em_mac; 1581 kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx; 1582 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1583 crypto_context_t mac_context = mac_ctx; 1584 1585 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_FINAL, 1586 pd->pd_sid, NULL, NULL, NULL, (crypto_data_t *)ct, 1587 NULL); 1588 1589 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1590 B_FALSE); 1591 1592 /* It can't be CRYPTO_QUEUED */ 1593 if (err != CRYPTO_SUCCESS) { 1594 crypto_cancel_ctx(mac_context); 1595 break; 1596 } 1597 1598 if (ct->dd_len2 > 0) { 1599 save_offset = ct->dd_offset1; 1600 save_len = ct->dd_len1; 1601 ct->dd_offset1 = ct->dd_offset2; 1602 ct->dd_len1 = ct->dd_len2; 1603 1604 err = crypto_mac_update(mac_context, 1605 (crypto_data_t *)ct, NULL); 1606 1607 ct->dd_offset1 = save_offset; 1608 ct->dd_len1 = save_len; 1609 1610 if (err != CRYPTO_SUCCESS) { 1611 crypto_cancel_ctx(mac_context); 1612 return (err); 1613 } 1614 } 1615 1616 /* and finally, collect the MAC */ 1617 err = crypto_mac_final(mac_context, mac, NULL); 1618 break; 1619 } 1620 1621 default: 1622 break; 1623 } 1624 KCF_PROV_INCRSTATS(pd, err); 1625 break; 1626 } 1627 case KCF_OG_MAC_DECRYPT: { 1628 kcf_mac_decrypt_ops_params_t *mdops = 1629 ¶ms->rp_u.mac_decrypt_params; 1630 kcf_context_t *decr_kcf_ctx; 1631 crypto_ctx_t *mac_ctx; 1632 kcf_req_params_t decr_params; 1633 1634 decr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private); 1635 1636 switch (optype) { 1637 case KCF_OP_INIT: { 1638 decr_kcf_ctx->kc_secondctx = NULL; 1639 1640 err = crypto_mac_init(&mdops->md_mac_mech, 1641 mdops->md_mac_key, mdops->md_mac_templ, 1642 (crypto_context_t *)&mac_ctx, NULL); 1643 1644 /* It can't be CRYPTO_QUEUED */ 1645 if (err != CRYPTO_SUCCESS) { 1646 break; 1647 } 1648 1649 KCF_WRAP_DECRYPT_OPS_PARAMS(&decr_params, KCF_OP_INIT, 1650 pd->pd_sid, &mdops->md_decr_mech, 1651 mdops->md_decr_key, NULL, NULL, 1652 mdops->md_decr_templ); 1653 1654 err = kcf_submit_request(pd, ctx, NULL, &decr_params, 1655 B_FALSE); 1656 1657 /* It can't be CRYPTO_QUEUED */ 1658 if (err != CRYPTO_SUCCESS) { 1659 crypto_cancel_ctx((crypto_context_t)mac_ctx); 1660 break; 1661 } 1662 1663 decr_kcf_ctx->kc_secondctx = (kcf_context_t *) 1664 mac_ctx->cc_framework_private; 1665 KCF_CONTEXT_REFHOLD((kcf_context_t *) 1666 mac_ctx->cc_framework_private); 1667 1668 break; 1669 1670 } 1671 case KCF_OP_UPDATE: { 1672 crypto_dual_data_t *ct = mdops->md_ciphertext; 1673 crypto_data_t *pt = mdops->md_plaintext; 1674 kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx; 1675 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1676 1677 err = crypto_mac_update((crypto_context_t)mac_ctx, 1678 (crypto_data_t *)ct, NULL); 1679 1680 if (err != CRYPTO_SUCCESS) 1681 break; 1682 1683 save_offset = ct->dd_offset1; 1684 save_len = ct->dd_len1; 1685 1686 /* zero ct->dd_len2 means decrypt everything */ 1687 if (ct->dd_len2 > 0) { 1688 ct->dd_offset1 = ct->dd_offset2; 1689 ct->dd_len1 = ct->dd_len2; 1690 } 1691 1692 err = crypto_decrypt_update((crypto_context_t)ctx, 1693 (crypto_data_t *)ct, pt, NULL); 1694 1695 ct->dd_offset1 = save_offset; 1696 ct->dd_len1 = save_len; 1697 1698 break; 1699 } 1700 case KCF_OP_FINAL: { 1701 crypto_data_t *pt = mdops->md_plaintext; 1702 crypto_data_t *mac = mdops->md_mac; 1703 kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx; 1704 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1705 1706 err = crypto_mac_final((crypto_context_t)mac_ctx, 1707 mac, NULL); 1708 1709 if (err != CRYPTO_SUCCESS) { 1710 crypto_cancel_ctx(ctx); 1711 break; 1712 } 1713 1714 /* Get the last chunk of plaintext */ 1715 KCF_CONTEXT_REFHOLD(decr_kcf_ctx); 1716 err = crypto_decrypt_final((crypto_context_t)ctx, pt, 1717 NULL); 1718 1719 break; 1720 } 1721 } 1722 break; 1723 } 1724 default: 1725 1726 break; 1727 } /* end of switch(params->rp_opgrp) */ 1728 1729 return (err); 1730 } 1731