1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file contains routines which call into a provider's 30 * entry points and do other related work. 31 */ 32 33 #include <sys/types.h> 34 #include <sys/systm.h> 35 #include <sys/taskq_impl.h> 36 #include <sys/cmn_err.h> 37 38 #include <sys/crypto/common.h> 39 #include <sys/crypto/impl.h> 40 #include <sys/crypto/sched_impl.h> 41 42 /* 43 * Return B_TRUE if the specified entry point is NULL. We rely on the 44 * caller to provide, with offset_1 and offset_2, information to calculate 45 * the location of the entry point. The ops argument is a temporary local 46 * variable defined as caddr_t *. 47 */ 48 #define KCF_PROV_NULL_ENTRY_POINT(pd, o1, o2, ops) \ 49 (ops = (caddr_t *)((caddr_t)(pd)->pd_ops_vector + (o1)), \ 50 (*ops == NULL || *(caddr_t *)((caddr_t)(*ops) + (o2)) == NULL)) 51 52 53 static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *, 54 kcf_req_params_t *); 55 void 56 kcf_free_triedlist(kcf_prov_tried_t *list) 57 { 58 kcf_prov_tried_t *l; 59 60 while ((l = list) != NULL) { 61 list = list->pt_next; 62 KCF_PROV_REFRELE(l->pt_pd); 63 kmem_free(l, sizeof (kcf_prov_tried_t)); 64 } 65 } 66 67 kcf_prov_tried_t * 68 kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd, 69 int kmflag) 70 { 71 kcf_prov_tried_t *l; 72 73 l = kmem_alloc(sizeof (kcf_prov_tried_t), kmflag); 74 if (l == NULL) 75 return (NULL); 76 77 l->pt_pd = pd; 78 l->pt_next = *list; 79 *list = l; 80 81 return (l); 82 } 83 84 static boolean_t 85 is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl) 86 { 87 while (triedl != NULL) { 88 if (triedl->pt_pd == pd) 89 return (B_TRUE); 90 triedl = triedl->pt_next; 91 }; 92 93 return (B_FALSE); 94 } 95 96 /* 97 * Search a mech entry's hardware provider list for the specified 98 * provider. Return true if found. 99 */ 100 static boolean_t 101 is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me, 102 crypto_func_group_t fg) 103 { 104 kcf_prov_mech_desc_t *prov_chain; 105 106 prov_chain = me->me_hw_prov_chain; 107 if (prov_chain != NULL) { 108 ASSERT(me->me_num_hwprov > 0); 109 for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) { 110 if (prov_chain->pm_prov_desc == pd && 111 IS_FG_SUPPORTED(prov_chain, fg)) { 112 return (B_TRUE); 113 } 114 } 115 } 116 return (B_FALSE); 117 } 118 119 /* 120 * This routine, given a logical provider, returns the least loaded 121 * provider belonging to the logical provider. The provider must be 122 * able to do the specified mechanism, i.e. check that the mechanism 123 * hasn't been disabled. In addition, just in case providers are not 124 * entirely equivalent, the provider's entry point is checked for 125 * non-nullness. This is accomplished by having the caller pass, as 126 * arguments, the offset of the function group (offset_1), and the 127 * offset of the function within the function group (offset_2). 128 * Returns NULL if no provider can be found. 129 */ 130 int 131 kcf_get_hardware_provider(crypto_mech_type_t mech_type_1, 132 crypto_mech_type_t mech_type_2, boolean_t call_restrict, 133 kcf_provider_desc_t *old, kcf_provider_desc_t **new, crypto_func_group_t fg) 134 { 135 kcf_provider_desc_t *provider, *real_pd = old; 136 kcf_provider_desc_t *gpd = NULL; /* good provider */ 137 kcf_provider_desc_t *bpd = NULL; /* busy provider */ 138 kcf_provider_list_t *p; 139 kcf_ops_class_t class; 140 kcf_mech_entry_t *me; 141 kcf_mech_entry_tab_t *me_tab; 142 int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS; 143 144 /* get the mech entry for the specified mechanism */ 145 class = KCF_MECH2CLASS(mech_type_1); 146 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { 147 return (CRYPTO_MECHANISM_INVALID); 148 } 149 150 me_tab = &kcf_mech_tabs_tab[class]; 151 index = KCF_MECH2INDEX(mech_type_1); 152 if ((index < 0) || (index >= me_tab->met_size)) { 153 return (CRYPTO_MECHANISM_INVALID); 154 } 155 156 me = &((me_tab->met_tab)[index]); 157 mutex_enter(&me->me_mutex); 158 159 /* 160 * We assume the provider descriptor will not go away because 161 * it is being held somewhere, i.e. its reference count has been 162 * incremented. In the case of the crypto module, the provider 163 * descriptor is held by the session structure. 164 */ 165 if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 166 if (old->pd_provider_list == NULL) { 167 real_pd = NULL; 168 rv = CRYPTO_DEVICE_ERROR; 169 goto out; 170 } 171 /* 172 * Find the least loaded real provider. tq_nalloc gives 173 * the number of task entries in the task queue. We do 174 * not acquire tq_lock here as it is not critical to 175 * get the exact number and the lock contention may be 176 * too costly for this code path. 177 */ 178 mutex_enter(&old->pd_lock); 179 p = old->pd_provider_list; 180 while (p != NULL) { 181 provider = p->pl_provider; 182 183 ASSERT(provider->pd_prov_type != 184 CRYPTO_LOGICAL_PROVIDER); 185 186 if (call_restrict && provider->pd_restricted) { 187 p = p->pl_next; 188 continue; 189 } 190 191 if (!is_valid_provider_for_mech(provider, me, fg)) { 192 p = p->pl_next; 193 continue; 194 } 195 196 /* provider does second mech */ 197 if (mech_type_2 != CRYPTO_MECH_INVALID) { 198 crypto_mech_type_t mech_type; 199 int i; 200 201 /* convert from kef to provider's number */ 202 mech_type = provider->pd_map_mechnums 203 [KCF_MECH2CLASS(mech_type_2)] 204 [KCF_MECH2INDEX(mech_type_2)]; 205 206 for (i = 0; i < provider->pd_mech_list_count; 207 i++) { 208 if (provider->pd_mechanisms[i] 209 .cm_mech_number == mech_type) 210 break; 211 } 212 if (i == provider->pd_mech_list_count) { 213 p = p->pl_next; 214 continue; 215 } 216 } 217 218 if (provider->pd_state != KCF_PROV_READY) { 219 /* choose BUSY if no READY providers */ 220 if (provider->pd_state == KCF_PROV_BUSY) 221 bpd = provider; 222 p = p->pl_next; 223 continue; 224 } 225 226 len = provider->pd_sched_info.ks_taskq->tq_nalloc; 227 if (len < gqlen) { 228 gqlen = len; 229 gpd = provider; 230 } 231 232 p = p->pl_next; 233 } 234 235 if (gpd != NULL) { 236 real_pd = gpd; 237 KCF_PROV_REFHOLD(real_pd); 238 } else if (bpd != NULL) { 239 real_pd = bpd; 240 KCF_PROV_REFHOLD(real_pd); 241 } else { 242 /* can't find provider */ 243 real_pd = NULL; 244 rv = CRYPTO_MECHANISM_INVALID; 245 } 246 mutex_exit(&old->pd_lock); 247 248 } else { 249 if (!KCF_IS_PROV_USABLE(old) || 250 (call_restrict && old->pd_restricted)) { 251 real_pd = NULL; 252 rv = CRYPTO_DEVICE_ERROR; 253 goto out; 254 } 255 256 if (!is_valid_provider_for_mech(old, me, fg)) { 257 real_pd = NULL; 258 rv = CRYPTO_MECHANISM_INVALID; 259 goto out; 260 } 261 262 KCF_PROV_REFHOLD(real_pd); 263 } 264 out: 265 mutex_exit(&me->me_mutex); 266 *new = real_pd; 267 return (rv); 268 } 269 270 /* 271 * This routine, given a logical provider, returns the least loaded 272 * provider belonging to the logical provider. Just in case providers 273 * are not entirely equivalent, the provider's entry point is checked 274 * for non-nullness. This is accomplished by having the caller pass, as 275 * arguments, the offset of the function group (offset_1), and the 276 * offset of the function within the function group (offset_2). 277 * Returns NULL if no provider can be found. 278 */ 279 int 280 kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2, 281 boolean_t call_restrict, kcf_provider_desc_t *old, 282 kcf_provider_desc_t **new) 283 { 284 kcf_provider_desc_t *provider, *real_pd = old; 285 kcf_provider_desc_t *gpd = NULL; /* good provider */ 286 kcf_provider_desc_t *bpd = NULL; /* busy provider */ 287 kcf_provider_list_t *p; 288 caddr_t *ops; 289 int len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS; 290 291 /* 292 * We assume the provider descriptor will not go away because 293 * it is being held somewhere, i.e. its reference count has been 294 * incremented. In the case of the crypto module, the provider 295 * descriptor is held by the session structure. 296 */ 297 if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { 298 if (old->pd_provider_list == NULL) { 299 real_pd = NULL; 300 rv = CRYPTO_DEVICE_ERROR; 301 goto out; 302 } 303 /* 304 * Find the least loaded real provider. tq_nalloc gives 305 * the number of task entries in the task queue. We do 306 * not acquire tq_lock here as it is not critical to 307 * get the exact number and the lock contention may be 308 * too costly for this code path. 309 */ 310 mutex_enter(&old->pd_lock); 311 p = old->pd_provider_list; 312 while (p != NULL) { 313 provider = p->pl_provider; 314 315 ASSERT(provider->pd_prov_type != 316 CRYPTO_LOGICAL_PROVIDER); 317 318 if (call_restrict && provider->pd_restricted) { 319 p = p->pl_next; 320 continue; 321 } 322 if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1, 323 offset_2, ops)) { 324 p = p->pl_next; 325 continue; 326 } 327 328 if (provider->pd_state != KCF_PROV_READY) { 329 /* choose BUSY if no READY providers */ 330 if (provider->pd_state == KCF_PROV_BUSY) 331 bpd = provider; 332 p = p->pl_next; 333 continue; 334 } 335 336 len = provider->pd_sched_info.ks_taskq->tq_nalloc; 337 if (len < gqlen) { 338 gqlen = len; 339 gpd = provider; 340 } 341 342 p = p->pl_next; 343 } 344 mutex_exit(&old->pd_lock); 345 346 if (gpd != NULL) { 347 real_pd = gpd; 348 KCF_PROV_REFHOLD(real_pd); 349 } else if (bpd != NULL) { 350 real_pd = bpd; 351 KCF_PROV_REFHOLD(real_pd); 352 } else { 353 /* can't find provider */ 354 real_pd = NULL; 355 rv = CRYPTO_DEVICE_ERROR; 356 } 357 358 } else { 359 if (!KCF_IS_PROV_USABLE(old) || 360 (call_restrict && old->pd_restricted)) { 361 real_pd = NULL; 362 rv = CRYPTO_DEVICE_ERROR; 363 goto out; 364 } 365 366 if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) { 367 real_pd = NULL; 368 rv = CRYPTO_NOT_SUPPORTED; 369 goto out; 370 } 371 KCF_PROV_REFHOLD(real_pd); 372 } 373 out: 374 *new = real_pd; 375 return (rv); 376 } 377 378 /* 379 * Return the next member of a logical provider, given the previous 380 * member. The function returns true if the next member is found and 381 * bumps its refcnt before returning. 382 */ 383 boolean_t 384 kcf_get_next_logical_provider_member(kcf_provider_desc_t *logical_provider, 385 kcf_provider_desc_t *prev, kcf_provider_desc_t **pd) 386 { 387 kcf_provider_list_t *p; 388 kcf_provider_desc_t *next; 389 390 ASSERT(MUTEX_HELD(&logical_provider->pd_lock)); 391 p = logical_provider->pd_provider_list; 392 while (p != NULL) { 393 /* start the search */ 394 if (prev == NULL) { 395 next = p->pl_provider; 396 goto found; 397 } else { 398 /* find where we were before */ 399 if (p->pl_provider == prev) { 400 if (p->pl_next != NULL) { 401 next = p->pl_next->pl_provider; 402 goto found; 403 } 404 } 405 } 406 p = p->pl_next; 407 } 408 return (B_FALSE); 409 410 found: 411 KCF_PROV_REFHOLD(next); 412 *pd = next; 413 return (B_TRUE); 414 } 415 416 /* 417 * Return the best provider for the specified mechanism. The provider 418 * is held and it is the caller's responsibility to release it when done. 419 * The fg input argument is used as a search criterion to pick a provider. 420 * A provider has to support this function group to be picked. 421 * 422 * Find the least loaded provider in the list of providers. We do a linear 423 * search to find one. This is fine as we assume there are only a few 424 * number of providers in this list. If this assumption ever changes, 425 * we should revisit this. 426 * 427 * call_restrict represents if the caller should not be allowed to 428 * use restricted providers. 429 */ 430 kcf_provider_desc_t * 431 kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp, 432 int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg, 433 boolean_t call_restrict, size_t data_size) 434 { 435 kcf_provider_desc_t *pd = NULL, *gpd = NULL; 436 kcf_prov_mech_desc_t *prov_chain, *mdesc; 437 int len, gqlen = INT_MAX; 438 kcf_ops_class_t class; 439 int index; 440 kcf_mech_entry_t *me; 441 kcf_mech_entry_tab_t *me_tab; 442 443 class = KCF_MECH2CLASS(mech_type); 444 if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) { 445 *error = CRYPTO_MECHANISM_INVALID; 446 return (NULL); 447 } 448 449 me_tab = &kcf_mech_tabs_tab[class]; 450 index = KCF_MECH2INDEX(mech_type); 451 if ((index < 0) || (index >= me_tab->met_size)) { 452 *error = CRYPTO_MECHANISM_INVALID; 453 return (NULL); 454 } 455 456 me = &((me_tab->met_tab)[index]); 457 if (mepp != NULL) 458 *mepp = me; 459 460 mutex_enter(&me->me_mutex); 461 462 prov_chain = me->me_hw_prov_chain; 463 464 /* 465 * We check for the threshhold for using a hardware provider for 466 * this amount of data. If there is no software provider available 467 * for the mechanism, then the threshold is ignored. 468 */ 469 if ((prov_chain != NULL) && 470 ((data_size == 0) || (me->me_threshold == 0) || 471 (data_size > me->me_threshold) || 472 ((mdesc = me->me_sw_prov) == NULL) || 473 (!IS_FG_SUPPORTED(mdesc, fg)) || 474 (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) { 475 ASSERT(me->me_num_hwprov > 0); 476 /* there is at least one provider */ 477 478 /* 479 * Find the least loaded provider. tq_nalloc gives 480 * the number of task entries in the task queue. We do 481 * not acquire tq_lock here as it is not critical to 482 * get the exact number and the lock contention may be 483 * too costly for this code path. 484 */ 485 while (prov_chain != NULL) { 486 pd = prov_chain->pm_prov_desc; 487 488 if (!IS_FG_SUPPORTED(prov_chain, fg) || 489 !KCF_IS_PROV_USABLE(pd) || 490 IS_PROVIDER_TRIED(pd, triedl) || 491 (call_restrict && pd->pd_restricted)) { 492 prov_chain = prov_chain->pm_next; 493 continue; 494 } 495 496 if ((len = pd->pd_sched_info.ks_taskq->tq_nalloc) 497 < gqlen) { 498 gqlen = len; 499 gpd = pd; 500 } 501 502 prov_chain = prov_chain->pm_next; 503 } 504 505 pd = gpd; 506 } 507 508 /* No HW provider for this mech, is there a SW provider? */ 509 if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) { 510 pd = mdesc->pm_prov_desc; 511 if (!IS_FG_SUPPORTED(mdesc, fg) || 512 !KCF_IS_PROV_USABLE(pd) || 513 IS_PROVIDER_TRIED(pd, triedl) || 514 (call_restrict && pd->pd_restricted)) 515 pd = NULL; 516 } 517 518 if (pd == NULL) { 519 /* 520 * We do not want to report CRYPTO_MECH_NOT_SUPPORTED, when 521 * we are in the "fallback to the next provider" case. Rather 522 * we preserve the error, so that the client gets the right 523 * error code. 524 */ 525 if (triedl == NULL) 526 *error = CRYPTO_MECH_NOT_SUPPORTED; 527 } else 528 KCF_PROV_REFHOLD(pd); 529 530 mutex_exit(&me->me_mutex); 531 return (pd); 532 } 533 534 /* 535 * Very similar to kcf_get_mech_provider(). Finds the best provider capable of 536 * a dual operation with both me1 and me2. 537 * When no dual-ops capable providers are available, return the best provider 538 * for me1 only, and sets *prov_mt2 to CRYPTO_INVALID_MECHID; 539 * We assume/expect that a slower HW capable of the dual is still 540 * faster than the 2 fastest providers capable of the individual ops 541 * separately. 542 */ 543 kcf_provider_desc_t * 544 kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2, 545 kcf_mech_entry_t **mepp, crypto_mech_type_t *prov_mt1, 546 crypto_mech_type_t *prov_mt2, int *error, kcf_prov_tried_t *triedl, 547 crypto_func_group_t fg1, crypto_func_group_t fg2, boolean_t call_restrict, 548 size_t data_size) 549 { 550 kcf_provider_desc_t *pd = NULL, *pdm1 = NULL, *pdm1m2 = NULL; 551 kcf_prov_mech_desc_t *prov_chain, *mdesc; 552 int len, gqlen = INT_MAX, dgqlen = INT_MAX; 553 crypto_mech_info_list_t *mil; 554 crypto_mech_type_t m2id = mech2->cm_type; 555 kcf_mech_entry_t *me; 556 557 /* when mech is a valid mechanism, me will be its mech_entry */ 558 if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) { 559 *error = CRYPTO_MECHANISM_INVALID; 560 return (NULL); 561 } 562 563 *prov_mt2 = CRYPTO_MECH_INVALID; 564 565 if (mepp != NULL) 566 *mepp = me; 567 mutex_enter(&me->me_mutex); 568 569 prov_chain = me->me_hw_prov_chain; 570 /* 571 * We check the threshold for using a hardware provider for 572 * this amount of data. If there is no software provider available 573 * for the first mechanism, then the threshold is ignored. 574 */ 575 if ((prov_chain != NULL) && 576 ((data_size == 0) || (me->me_threshold == 0) || 577 (data_size > me->me_threshold) || 578 ((mdesc = me->me_sw_prov) == NULL) || 579 (!IS_FG_SUPPORTED(mdesc, fg1)) || 580 (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) { 581 /* there is at least one provider */ 582 ASSERT(me->me_num_hwprov > 0); 583 584 /* 585 * Find the least loaded provider capable of the combo 586 * me1 + me2, and save a pointer to the least loaded 587 * provider capable of me1 only. 588 */ 589 while (prov_chain != NULL) { 590 pd = prov_chain->pm_prov_desc; 591 len = pd->pd_sched_info.ks_taskq->tq_nalloc; 592 593 if (!IS_FG_SUPPORTED(prov_chain, fg1) || 594 !KCF_IS_PROV_USABLE(pd) || 595 IS_PROVIDER_TRIED(pd, triedl) || 596 (call_restrict && pd->pd_restricted)) { 597 prov_chain = prov_chain->pm_next; 598 continue; 599 } 600 601 /* Save the best provider capable of m1 */ 602 if (len < gqlen) { 603 *prov_mt1 = 604 prov_chain->pm_mech_info.cm_mech_number; 605 gqlen = len; 606 pdm1 = pd; 607 } 608 609 /* See if pd can do me2 too */ 610 for (mil = prov_chain->pm_mi_list; 611 mil != NULL; mil = mil->ml_next) { 612 if ((mil->ml_mech_info.cm_func_group_mask & 613 fg2) == 0) 614 continue; 615 616 if ((mil->ml_kcf_mechid == m2id) && 617 (len < dgqlen)) { 618 /* Bingo! */ 619 dgqlen = len; 620 pdm1m2 = pd; 621 *prov_mt2 = 622 mil->ml_mech_info.cm_mech_number; 623 *prov_mt1 = prov_chain-> 624 pm_mech_info.cm_mech_number; 625 break; 626 } 627 } 628 629 prov_chain = prov_chain->pm_next; 630 } 631 632 pd = (pdm1m2 != NULL) ? pdm1m2 : pdm1; 633 } 634 635 /* no HW provider for this mech, is there a SW provider? */ 636 if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) { 637 pd = mdesc->pm_prov_desc; 638 if (!IS_FG_SUPPORTED(mdesc, fg1) || 639 !KCF_IS_PROV_USABLE(pd) || 640 IS_PROVIDER_TRIED(pd, triedl) || 641 (call_restrict && pd->pd_restricted)) 642 pd = NULL; 643 else { 644 /* See if pd can do me2 too */ 645 for (mil = me->me_sw_prov->pm_mi_list; 646 mil != NULL; mil = mil->ml_next) { 647 if ((mil->ml_mech_info.cm_func_group_mask & 648 fg2) == 0) 649 continue; 650 651 if (mil->ml_kcf_mechid == m2id) { 652 /* Bingo! */ 653 *prov_mt2 = 654 mil->ml_mech_info.cm_mech_number; 655 break; 656 } 657 } 658 *prov_mt1 = me->me_sw_prov->pm_mech_info.cm_mech_number; 659 } 660 } 661 662 if (pd == NULL) 663 *error = CRYPTO_MECH_NOT_SUPPORTED; 664 else 665 KCF_PROV_REFHOLD(pd); 666 667 mutex_exit(&me->me_mutex); 668 return (pd); 669 } 670 671 /* 672 * Do the actual work of calling the provider routines. 673 * 674 * pd - Provider structure 675 * ctx - Context for this operation 676 * params - Parameters for this operation 677 * rhndl - Request handle to use for notification 678 * 679 * The return values are the same as that of the respective SPI. 680 */ 681 int 682 common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 683 kcf_req_params_t *params, crypto_req_handle_t rhndl) 684 { 685 int err = CRYPTO_ARGUMENTS_BAD; 686 kcf_op_type_t optype; 687 688 optype = params->rp_optype; 689 690 switch (params->rp_opgrp) { 691 case KCF_OG_DIGEST: { 692 kcf_digest_ops_params_t *dops = ¶ms->rp_u.digest_params; 693 694 switch (optype) { 695 case KCF_OP_INIT: 696 /* 697 * We should do this only here and not in KCF_WRAP_* 698 * macros. This is because we may want to try other 699 * providers, in case we recover from a failure. 700 */ 701 KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype, 702 pd, &dops->do_mech); 703 704 err = KCF_PROV_DIGEST_INIT(pd, ctx, &dops->do_mech, 705 rhndl); 706 break; 707 708 case KCF_OP_SINGLE: 709 err = KCF_PROV_DIGEST(pd, ctx, dops->do_data, 710 dops->do_digest, rhndl); 711 break; 712 713 case KCF_OP_UPDATE: 714 err = KCF_PROV_DIGEST_UPDATE(pd, ctx, 715 dops->do_data, rhndl); 716 break; 717 718 case KCF_OP_FINAL: 719 err = KCF_PROV_DIGEST_FINAL(pd, ctx, 720 dops->do_digest, rhndl); 721 break; 722 723 case KCF_OP_ATOMIC: 724 ASSERT(ctx == NULL); 725 KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype, 726 pd, &dops->do_mech); 727 err = KCF_PROV_DIGEST_ATOMIC(pd, dops->do_sid, 728 &dops->do_mech, dops->do_data, dops->do_digest, 729 rhndl); 730 break; 731 732 case KCF_OP_DIGEST_KEY: 733 err = KCF_PROV_DIGEST_KEY(pd, ctx, dops->do_digest_key, 734 rhndl); 735 break; 736 737 default: 738 break; 739 } 740 break; 741 } 742 743 case KCF_OG_MAC: { 744 kcf_mac_ops_params_t *mops = ¶ms->rp_u.mac_params; 745 746 switch (optype) { 747 case KCF_OP_INIT: 748 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 749 pd, &mops->mo_mech); 750 751 err = KCF_PROV_MAC_INIT(pd, ctx, &mops->mo_mech, 752 mops->mo_key, mops->mo_templ, rhndl); 753 break; 754 755 case KCF_OP_SINGLE: 756 err = KCF_PROV_MAC(pd, ctx, mops->mo_data, 757 mops->mo_mac, rhndl); 758 break; 759 760 case KCF_OP_UPDATE: 761 err = KCF_PROV_MAC_UPDATE(pd, ctx, mops->mo_data, 762 rhndl); 763 break; 764 765 case KCF_OP_FINAL: 766 err = KCF_PROV_MAC_FINAL(pd, ctx, mops->mo_mac, rhndl); 767 break; 768 769 case KCF_OP_ATOMIC: 770 ASSERT(ctx == NULL); 771 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 772 pd, &mops->mo_mech); 773 774 err = KCF_PROV_MAC_ATOMIC(pd, mops->mo_sid, 775 &mops->mo_mech, mops->mo_key, mops->mo_data, 776 mops->mo_mac, mops->mo_templ, rhndl); 777 break; 778 779 case KCF_OP_MAC_VERIFY_ATOMIC: 780 ASSERT(ctx == NULL); 781 KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype, 782 pd, &mops->mo_mech); 783 784 err = KCF_PROV_MAC_VERIFY_ATOMIC(pd, mops->mo_sid, 785 &mops->mo_mech, mops->mo_key, mops->mo_data, 786 mops->mo_mac, mops->mo_templ, rhndl); 787 break; 788 789 default: 790 break; 791 } 792 break; 793 } 794 795 case KCF_OG_ENCRYPT: { 796 kcf_encrypt_ops_params_t *eops = ¶ms->rp_u.encrypt_params; 797 798 switch (optype) { 799 case KCF_OP_INIT: 800 KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype, 801 pd, &eops->eo_mech); 802 803 err = KCF_PROV_ENCRYPT_INIT(pd, ctx, &eops->eo_mech, 804 eops->eo_key, eops->eo_templ, rhndl); 805 break; 806 807 case KCF_OP_SINGLE: 808 err = KCF_PROV_ENCRYPT(pd, ctx, eops->eo_plaintext, 809 eops->eo_ciphertext, rhndl); 810 break; 811 812 case KCF_OP_UPDATE: 813 err = KCF_PROV_ENCRYPT_UPDATE(pd, ctx, 814 eops->eo_plaintext, eops->eo_ciphertext, rhndl); 815 break; 816 817 case KCF_OP_FINAL: 818 err = KCF_PROV_ENCRYPT_FINAL(pd, ctx, 819 eops->eo_ciphertext, rhndl); 820 break; 821 822 case KCF_OP_ATOMIC: 823 ASSERT(ctx == NULL); 824 KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype, 825 pd, &eops->eo_mech); 826 827 err = KCF_PROV_ENCRYPT_ATOMIC(pd, eops->eo_sid, 828 &eops->eo_mech, eops->eo_key, eops->eo_plaintext, 829 eops->eo_ciphertext, eops->eo_templ, rhndl); 830 break; 831 832 default: 833 break; 834 } 835 break; 836 } 837 838 case KCF_OG_DECRYPT: { 839 kcf_decrypt_ops_params_t *dcrops = ¶ms->rp_u.decrypt_params; 840 841 switch (optype) { 842 case KCF_OP_INIT: 843 KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype, 844 pd, &dcrops->dop_mech); 845 846 err = KCF_PROV_DECRYPT_INIT(pd, ctx, &dcrops->dop_mech, 847 dcrops->dop_key, dcrops->dop_templ, rhndl); 848 break; 849 850 case KCF_OP_SINGLE: 851 err = KCF_PROV_DECRYPT(pd, ctx, dcrops->dop_ciphertext, 852 dcrops->dop_plaintext, rhndl); 853 break; 854 855 case KCF_OP_UPDATE: 856 err = KCF_PROV_DECRYPT_UPDATE(pd, ctx, 857 dcrops->dop_ciphertext, dcrops->dop_plaintext, 858 rhndl); 859 break; 860 861 case KCF_OP_FINAL: 862 err = KCF_PROV_DECRYPT_FINAL(pd, ctx, 863 dcrops->dop_plaintext, rhndl); 864 break; 865 866 case KCF_OP_ATOMIC: 867 ASSERT(ctx == NULL); 868 KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype, 869 pd, &dcrops->dop_mech); 870 871 err = KCF_PROV_DECRYPT_ATOMIC(pd, dcrops->dop_sid, 872 &dcrops->dop_mech, dcrops->dop_key, 873 dcrops->dop_ciphertext, dcrops->dop_plaintext, 874 dcrops->dop_templ, rhndl); 875 break; 876 877 default: 878 break; 879 } 880 break; 881 } 882 883 case KCF_OG_SIGN: { 884 kcf_sign_ops_params_t *sops = ¶ms->rp_u.sign_params; 885 886 switch (optype) { 887 case KCF_OP_INIT: 888 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 889 pd, &sops->so_mech); 890 891 err = KCF_PROV_SIGN_INIT(pd, ctx, &sops->so_mech, 892 sops->so_key, sops->so_templ, rhndl); 893 break; 894 895 case KCF_OP_SIGN_RECOVER_INIT: 896 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 897 pd, &sops->so_mech); 898 899 err = KCF_PROV_SIGN_RECOVER_INIT(pd, ctx, 900 &sops->so_mech, sops->so_key, sops->so_templ, 901 rhndl); 902 break; 903 904 case KCF_OP_SINGLE: 905 err = KCF_PROV_SIGN(pd, ctx, sops->so_data, 906 sops->so_signature, rhndl); 907 break; 908 909 case KCF_OP_SIGN_RECOVER: 910 err = KCF_PROV_SIGN_RECOVER(pd, ctx, 911 sops->so_data, sops->so_signature, rhndl); 912 break; 913 914 case KCF_OP_UPDATE: 915 err = KCF_PROV_SIGN_UPDATE(pd, ctx, sops->so_data, 916 rhndl); 917 break; 918 919 case KCF_OP_FINAL: 920 err = KCF_PROV_SIGN_FINAL(pd, ctx, sops->so_signature, 921 rhndl); 922 break; 923 924 case KCF_OP_ATOMIC: 925 ASSERT(ctx == NULL); 926 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 927 pd, &sops->so_mech); 928 929 err = KCF_PROV_SIGN_ATOMIC(pd, sops->so_sid, 930 &sops->so_mech, sops->so_key, sops->so_data, 931 sops->so_templ, sops->so_signature, rhndl); 932 break; 933 934 case KCF_OP_SIGN_RECOVER_ATOMIC: 935 ASSERT(ctx == NULL); 936 KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype, 937 pd, &sops->so_mech); 938 939 err = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, sops->so_sid, 940 &sops->so_mech, sops->so_key, sops->so_data, 941 sops->so_templ, sops->so_signature, rhndl); 942 break; 943 944 default: 945 break; 946 } 947 break; 948 } 949 950 case KCF_OG_VERIFY: { 951 kcf_verify_ops_params_t *vops = ¶ms->rp_u.verify_params; 952 953 switch (optype) { 954 case KCF_OP_INIT: 955 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 956 pd, &vops->vo_mech); 957 958 err = KCF_PROV_VERIFY_INIT(pd, ctx, &vops->vo_mech, 959 vops->vo_key, vops->vo_templ, rhndl); 960 break; 961 962 case KCF_OP_VERIFY_RECOVER_INIT: 963 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 964 pd, &vops->vo_mech); 965 966 err = KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx, 967 &vops->vo_mech, vops->vo_key, vops->vo_templ, 968 rhndl); 969 break; 970 971 case KCF_OP_SINGLE: 972 err = KCF_PROV_VERIFY(pd, ctx, vops->vo_data, 973 vops->vo_signature, rhndl); 974 break; 975 976 case KCF_OP_VERIFY_RECOVER: 977 err = KCF_PROV_VERIFY_RECOVER(pd, ctx, 978 vops->vo_signature, vops->vo_data, rhndl); 979 break; 980 981 case KCF_OP_UPDATE: 982 err = KCF_PROV_VERIFY_UPDATE(pd, ctx, vops->vo_data, 983 rhndl); 984 break; 985 986 case KCF_OP_FINAL: 987 err = KCF_PROV_VERIFY_FINAL(pd, ctx, vops->vo_signature, 988 rhndl); 989 break; 990 991 case KCF_OP_ATOMIC: 992 ASSERT(ctx == NULL); 993 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 994 pd, &vops->vo_mech); 995 996 err = KCF_PROV_VERIFY_ATOMIC(pd, vops->vo_sid, 997 &vops->vo_mech, vops->vo_key, vops->vo_data, 998 vops->vo_templ, vops->vo_signature, rhndl); 999 break; 1000 1001 case KCF_OP_VERIFY_RECOVER_ATOMIC: 1002 ASSERT(ctx == NULL); 1003 KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype, 1004 pd, &vops->vo_mech); 1005 1006 err = KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, vops->vo_sid, 1007 &vops->vo_mech, vops->vo_key, vops->vo_signature, 1008 vops->vo_templ, vops->vo_data, rhndl); 1009 break; 1010 1011 default: 1012 break; 1013 } 1014 break; 1015 } 1016 1017 case KCF_OG_ENCRYPT_MAC: { 1018 kcf_encrypt_mac_ops_params_t *eops = 1019 ¶ms->rp_u.encrypt_mac_params; 1020 kcf_context_t *kcf_secondctx; 1021 1022 switch (optype) { 1023 case KCF_OP_INIT: 1024 kcf_secondctx = ((kcf_context_t *) 1025 (ctx->cc_framework_private))->kc_secondctx; 1026 1027 if (kcf_secondctx != NULL) { 1028 err = kcf_emulate_dual(pd, ctx, params); 1029 break; 1030 } 1031 KCF_SET_PROVIDER_MECHNUM( 1032 eops->em_framework_encr_mechtype, 1033 pd, &eops->em_encr_mech); 1034 1035 KCF_SET_PROVIDER_MECHNUM( 1036 eops->em_framework_mac_mechtype, 1037 pd, &eops->em_mac_mech); 1038 1039 err = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx, 1040 &eops->em_encr_mech, eops->em_encr_key, 1041 &eops->em_mac_mech, eops->em_mac_key, 1042 eops->em_encr_templ, eops->em_mac_templ, 1043 rhndl); 1044 1045 break; 1046 1047 case KCF_OP_SINGLE: 1048 err = KCF_PROV_ENCRYPT_MAC(pd, ctx, 1049 eops->em_plaintext, eops->em_ciphertext, 1050 eops->em_mac, rhndl); 1051 break; 1052 1053 case KCF_OP_UPDATE: 1054 kcf_secondctx = ((kcf_context_t *) 1055 (ctx->cc_framework_private))->kc_secondctx; 1056 if (kcf_secondctx != NULL) { 1057 err = kcf_emulate_dual(pd, ctx, params); 1058 break; 1059 } 1060 err = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx, 1061 eops->em_plaintext, eops->em_ciphertext, rhndl); 1062 break; 1063 1064 case KCF_OP_FINAL: 1065 kcf_secondctx = ((kcf_context_t *) 1066 (ctx->cc_framework_private))->kc_secondctx; 1067 if (kcf_secondctx != NULL) { 1068 err = kcf_emulate_dual(pd, ctx, params); 1069 break; 1070 } 1071 err = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx, 1072 eops->em_ciphertext, eops->em_mac, rhndl); 1073 break; 1074 1075 case KCF_OP_ATOMIC: 1076 ASSERT(ctx == NULL); 1077 1078 KCF_SET_PROVIDER_MECHNUM( 1079 eops->em_framework_encr_mechtype, 1080 pd, &eops->em_encr_mech); 1081 1082 KCF_SET_PROVIDER_MECHNUM( 1083 eops->em_framework_mac_mechtype, 1084 pd, &eops->em_mac_mech); 1085 1086 err = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, eops->em_sid, 1087 &eops->em_encr_mech, eops->em_encr_key, 1088 &eops->em_mac_mech, eops->em_mac_key, 1089 eops->em_plaintext, eops->em_ciphertext, 1090 eops->em_mac, 1091 eops->em_encr_templ, eops->em_mac_templ, 1092 rhndl); 1093 1094 break; 1095 1096 default: 1097 break; 1098 } 1099 break; 1100 } 1101 1102 case KCF_OG_MAC_DECRYPT: { 1103 kcf_mac_decrypt_ops_params_t *dops = 1104 ¶ms->rp_u.mac_decrypt_params; 1105 kcf_context_t *kcf_secondctx; 1106 1107 switch (optype) { 1108 case KCF_OP_INIT: 1109 kcf_secondctx = ((kcf_context_t *) 1110 (ctx->cc_framework_private))->kc_secondctx; 1111 1112 if (kcf_secondctx != NULL) { 1113 err = kcf_emulate_dual(pd, ctx, params); 1114 break; 1115 } 1116 KCF_SET_PROVIDER_MECHNUM( 1117 dops->md_framework_mac_mechtype, 1118 pd, &dops->md_mac_mech); 1119 1120 KCF_SET_PROVIDER_MECHNUM( 1121 dops->md_framework_decr_mechtype, 1122 pd, &dops->md_decr_mech); 1123 1124 err = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx, 1125 &dops->md_mac_mech, dops->md_mac_key, 1126 &dops->md_decr_mech, dops->md_decr_key, 1127 dops->md_mac_templ, dops->md_decr_templ, 1128 rhndl); 1129 1130 break; 1131 1132 case KCF_OP_SINGLE: 1133 err = KCF_PROV_MAC_DECRYPT(pd, ctx, 1134 dops->md_ciphertext, dops->md_mac, 1135 dops->md_plaintext, rhndl); 1136 break; 1137 1138 case KCF_OP_UPDATE: 1139 kcf_secondctx = ((kcf_context_t *) 1140 (ctx->cc_framework_private))->kc_secondctx; 1141 if (kcf_secondctx != NULL) { 1142 err = kcf_emulate_dual(pd, ctx, params); 1143 break; 1144 } 1145 err = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx, 1146 dops->md_ciphertext, dops->md_plaintext, rhndl); 1147 break; 1148 1149 case KCF_OP_FINAL: 1150 kcf_secondctx = ((kcf_context_t *) 1151 (ctx->cc_framework_private))->kc_secondctx; 1152 if (kcf_secondctx != NULL) { 1153 err = kcf_emulate_dual(pd, ctx, params); 1154 break; 1155 } 1156 err = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx, 1157 dops->md_mac, dops->md_plaintext, rhndl); 1158 break; 1159 1160 case KCF_OP_ATOMIC: 1161 ASSERT(ctx == NULL); 1162 1163 KCF_SET_PROVIDER_MECHNUM( 1164 dops->md_framework_mac_mechtype, 1165 pd, &dops->md_mac_mech); 1166 1167 KCF_SET_PROVIDER_MECHNUM( 1168 dops->md_framework_decr_mechtype, 1169 pd, &dops->md_decr_mech); 1170 1171 err = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, dops->md_sid, 1172 &dops->md_mac_mech, dops->md_mac_key, 1173 &dops->md_decr_mech, dops->md_decr_key, 1174 dops->md_ciphertext, dops->md_mac, 1175 dops->md_plaintext, 1176 dops->md_mac_templ, dops->md_decr_templ, 1177 rhndl); 1178 1179 break; 1180 1181 case KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC: 1182 ASSERT(ctx == NULL); 1183 1184 KCF_SET_PROVIDER_MECHNUM( 1185 dops->md_framework_mac_mechtype, 1186 pd, &dops->md_mac_mech); 1187 1188 KCF_SET_PROVIDER_MECHNUM( 1189 dops->md_framework_decr_mechtype, 1190 pd, &dops->md_decr_mech); 1191 1192 err = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd, 1193 dops->md_sid, &dops->md_mac_mech, dops->md_mac_key, 1194 &dops->md_decr_mech, dops->md_decr_key, 1195 dops->md_ciphertext, dops->md_mac, 1196 dops->md_plaintext, 1197 dops->md_mac_templ, dops->md_decr_templ, 1198 rhndl); 1199 1200 break; 1201 1202 default: 1203 break; 1204 } 1205 break; 1206 } 1207 1208 case KCF_OG_KEY: { 1209 kcf_key_ops_params_t *kops = ¶ms->rp_u.key_params; 1210 1211 ASSERT(ctx == NULL); 1212 KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd, 1213 &kops->ko_mech); 1214 1215 switch (optype) { 1216 case KCF_OP_KEY_GENERATE: 1217 err = KCF_PROV_KEY_GENERATE(pd, kops->ko_sid, 1218 &kops->ko_mech, 1219 kops->ko_key_template, kops->ko_key_attribute_count, 1220 kops->ko_key_object_id_ptr, rhndl); 1221 break; 1222 1223 case KCF_OP_KEY_GENERATE_PAIR: 1224 err = KCF_PROV_KEY_GENERATE_PAIR(pd, kops->ko_sid, 1225 &kops->ko_mech, 1226 kops->ko_key_template, kops->ko_key_attribute_count, 1227 kops->ko_private_key_template, 1228 kops->ko_private_key_attribute_count, 1229 kops->ko_key_object_id_ptr, 1230 kops->ko_private_key_object_id_ptr, rhndl); 1231 break; 1232 1233 case KCF_OP_KEY_WRAP: 1234 err = KCF_PROV_KEY_WRAP(pd, kops->ko_sid, 1235 &kops->ko_mech, 1236 kops->ko_key, kops->ko_key_object_id_ptr, 1237 kops->ko_wrapped_key, kops->ko_wrapped_key_len_ptr, 1238 rhndl); 1239 break; 1240 1241 case KCF_OP_KEY_UNWRAP: 1242 err = KCF_PROV_KEY_UNWRAP(pd, kops->ko_sid, 1243 &kops->ko_mech, 1244 kops->ko_key, kops->ko_wrapped_key, 1245 kops->ko_wrapped_key_len_ptr, 1246 kops->ko_key_template, kops->ko_key_attribute_count, 1247 kops->ko_key_object_id_ptr, rhndl); 1248 break; 1249 1250 case KCF_OP_KEY_DERIVE: 1251 err = KCF_PROV_KEY_DERIVE(pd, kops->ko_sid, 1252 &kops->ko_mech, 1253 kops->ko_key, kops->ko_key_template, 1254 kops->ko_key_attribute_count, 1255 kops->ko_key_object_id_ptr, rhndl); 1256 break; 1257 1258 default: 1259 break; 1260 } 1261 break; 1262 } 1263 1264 case KCF_OG_RANDOM: { 1265 kcf_random_number_ops_params_t *rops = 1266 ¶ms->rp_u.random_number_params; 1267 1268 ASSERT(ctx == NULL); 1269 1270 switch (optype) { 1271 case KCF_OP_RANDOM_SEED: 1272 err = KCF_PROV_SEED_RANDOM(pd, rops->rn_sid, 1273 rops->rn_buf, rops->rn_buflen, rops->rn_entropy_est, 1274 rops->rn_flags, rhndl); 1275 break; 1276 1277 case KCF_OP_RANDOM_GENERATE: 1278 err = KCF_PROV_GENERATE_RANDOM(pd, rops->rn_sid, 1279 rops->rn_buf, rops->rn_buflen, rhndl); 1280 break; 1281 1282 default: 1283 break; 1284 } 1285 break; 1286 } 1287 1288 case KCF_OG_SESSION: { 1289 kcf_session_ops_params_t *sops = ¶ms->rp_u.session_params; 1290 1291 ASSERT(ctx == NULL); 1292 switch (optype) { 1293 case KCF_OP_SESSION_OPEN: 1294 /* 1295 * so_pd may be a logical provider, in which case 1296 * we need to check whether it has been removed. 1297 */ 1298 if (KCF_IS_PROV_REMOVED(sops->so_pd)) { 1299 err = CRYPTO_DEVICE_ERROR; 1300 break; 1301 } 1302 err = KCF_PROV_SESSION_OPEN(pd, sops->so_sid_ptr, 1303 rhndl, sops->so_pd); 1304 break; 1305 1306 case KCF_OP_SESSION_CLOSE: 1307 /* 1308 * so_pd may be a logical provider, in which case 1309 * we need to check whether it has been removed. 1310 */ 1311 if (KCF_IS_PROV_REMOVED(sops->so_pd)) { 1312 err = CRYPTO_DEVICE_ERROR; 1313 break; 1314 } 1315 err = KCF_PROV_SESSION_CLOSE(pd, sops->so_sid, 1316 rhndl, sops->so_pd); 1317 break; 1318 1319 case KCF_OP_SESSION_LOGIN: 1320 err = KCF_PROV_SESSION_LOGIN(pd, sops->so_sid, 1321 sops->so_user_type, sops->so_pin, 1322 sops->so_pin_len, rhndl); 1323 break; 1324 1325 case KCF_OP_SESSION_LOGOUT: 1326 err = KCF_PROV_SESSION_LOGOUT(pd, sops->so_sid, rhndl); 1327 break; 1328 1329 default: 1330 break; 1331 } 1332 break; 1333 } 1334 1335 case KCF_OG_OBJECT: { 1336 kcf_object_ops_params_t *jops = ¶ms->rp_u.object_params; 1337 1338 ASSERT(ctx == NULL); 1339 switch (optype) { 1340 case KCF_OP_OBJECT_CREATE: 1341 err = KCF_PROV_OBJECT_CREATE(pd, jops->oo_sid, 1342 jops->oo_template, jops->oo_attribute_count, 1343 jops->oo_object_id_ptr, rhndl); 1344 break; 1345 1346 case KCF_OP_OBJECT_COPY: 1347 err = KCF_PROV_OBJECT_COPY(pd, jops->oo_sid, 1348 jops->oo_object_id, 1349 jops->oo_template, jops->oo_attribute_count, 1350 jops->oo_object_id_ptr, rhndl); 1351 break; 1352 1353 case KCF_OP_OBJECT_DESTROY: 1354 err = KCF_PROV_OBJECT_DESTROY(pd, jops->oo_sid, 1355 jops->oo_object_id, rhndl); 1356 break; 1357 1358 case KCF_OP_OBJECT_GET_SIZE: 1359 err = KCF_PROV_OBJECT_GET_SIZE(pd, jops->oo_sid, 1360 jops->oo_object_id, jops->oo_object_size, rhndl); 1361 break; 1362 1363 case KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE: 1364 err = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd, 1365 jops->oo_sid, jops->oo_object_id, 1366 jops->oo_template, jops->oo_attribute_count, rhndl); 1367 break; 1368 1369 case KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE: 1370 err = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd, 1371 jops->oo_sid, jops->oo_object_id, 1372 jops->oo_template, jops->oo_attribute_count, rhndl); 1373 break; 1374 1375 case KCF_OP_OBJECT_FIND_INIT: 1376 err = KCF_PROV_OBJECT_FIND_INIT(pd, jops->oo_sid, 1377 jops->oo_template, jops->oo_attribute_count, 1378 jops->oo_find_init_pp_ptr, rhndl); 1379 break; 1380 1381 case KCF_OP_OBJECT_FIND: 1382 err = KCF_PROV_OBJECT_FIND(pd, jops->oo_find_pp, 1383 jops->oo_object_id_ptr, jops->oo_max_object_count, 1384 jops->oo_object_count_ptr, rhndl); 1385 break; 1386 1387 case KCF_OP_OBJECT_FIND_FINAL: 1388 err = KCF_PROV_OBJECT_FIND_FINAL(pd, jops->oo_find_pp, 1389 rhndl); 1390 break; 1391 1392 default: 1393 break; 1394 } 1395 break; 1396 } 1397 1398 case KCF_OG_PROVMGMT: { 1399 kcf_provmgmt_ops_params_t *pops = ¶ms->rp_u.provmgmt_params; 1400 1401 ASSERT(ctx == NULL); 1402 switch (optype) { 1403 case KCF_OP_MGMT_EXTINFO: 1404 /* 1405 * po_pd may be a logical provider, in which case 1406 * we need to check whether it has been removed. 1407 */ 1408 if (KCF_IS_PROV_REMOVED(pops->po_pd)) { 1409 err = CRYPTO_DEVICE_ERROR; 1410 break; 1411 } 1412 err = KCF_PROV_EXT_INFO(pd, pops->po_ext_info, rhndl, 1413 pops->po_pd); 1414 break; 1415 1416 case KCF_OP_MGMT_INITTOKEN: 1417 err = KCF_PROV_INIT_TOKEN(pd, pops->po_pin, 1418 pops->po_pin_len, pops->po_label, rhndl); 1419 break; 1420 1421 case KCF_OP_MGMT_INITPIN: 1422 err = KCF_PROV_INIT_PIN(pd, pops->po_sid, pops->po_pin, 1423 pops->po_pin_len, rhndl); 1424 break; 1425 1426 case KCF_OP_MGMT_SETPIN: 1427 err = KCF_PROV_SET_PIN(pd, pops->po_sid, 1428 pops->po_old_pin, pops->po_old_pin_len, 1429 pops->po_pin, pops->po_pin_len, rhndl); 1430 break; 1431 1432 default: 1433 break; 1434 } 1435 break; 1436 } 1437 1438 default: 1439 break; 1440 } /* end of switch(params->rp_opgrp) */ 1441 1442 KCF_PROV_INCRSTATS(pd, err); 1443 return (err); 1444 } 1445 1446 /* 1447 * Emulate the call for a multipart dual ops with 2 single steps. 1448 * This routine is always called in the context of a working thread 1449 * running kcf_svc_do_run(). 1450 * The single steps are submitted in a pure synchronous way (blocking). 1451 * When this routine returns, kcf_svc_do_run() will call kcf_aop_done() 1452 * so the originating consumer's callback gets invoked. kcf_aop_done() 1453 * takes care of freeing the operation context. So, this routine does 1454 * not free the operation context. 1455 * 1456 * The provider descriptor is assumed held by the callers. 1457 */ 1458 static int 1459 kcf_emulate_dual(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 1460 kcf_req_params_t *params) 1461 { 1462 int err = CRYPTO_ARGUMENTS_BAD; 1463 kcf_op_type_t optype; 1464 size_t save_len; 1465 off_t save_offset; 1466 1467 optype = params->rp_optype; 1468 1469 switch (params->rp_opgrp) { 1470 case KCF_OG_ENCRYPT_MAC: { 1471 kcf_encrypt_mac_ops_params_t *cmops = 1472 ¶ms->rp_u.encrypt_mac_params; 1473 kcf_context_t *encr_kcf_ctx; 1474 crypto_ctx_t *mac_ctx; 1475 kcf_req_params_t encr_params; 1476 1477 encr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private); 1478 1479 switch (optype) { 1480 case KCF_OP_INIT: { 1481 encr_kcf_ctx->kc_secondctx = NULL; 1482 1483 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_INIT, 1484 pd->pd_sid, &cmops->em_encr_mech, 1485 cmops->em_encr_key, NULL, NULL, 1486 cmops->em_encr_templ); 1487 1488 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1489 B_FALSE); 1490 1491 /* It can't be CRYPTO_QUEUED */ 1492 if (err != CRYPTO_SUCCESS) { 1493 break; 1494 } 1495 1496 err = crypto_mac_init(&cmops->em_mac_mech, 1497 cmops->em_mac_key, cmops->em_mac_templ, 1498 (crypto_context_t *)&mac_ctx, NULL); 1499 1500 if (err == CRYPTO_SUCCESS) { 1501 encr_kcf_ctx->kc_secondctx = (kcf_context_t *) 1502 mac_ctx->cc_framework_private; 1503 KCF_CONTEXT_REFHOLD((kcf_context_t *) 1504 mac_ctx->cc_framework_private); 1505 } 1506 1507 break; 1508 1509 } 1510 case KCF_OP_UPDATE: { 1511 crypto_dual_data_t *ct = cmops->em_ciphertext; 1512 crypto_data_t *pt = cmops->em_plaintext; 1513 kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx; 1514 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1515 1516 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_UPDATE, 1517 pd->pd_sid, NULL, NULL, pt, (crypto_data_t *)ct, 1518 NULL); 1519 1520 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1521 B_FALSE); 1522 1523 /* It can't be CRYPTO_QUEUED */ 1524 if (err != CRYPTO_SUCCESS) { 1525 break; 1526 } 1527 1528 save_offset = ct->dd_offset1; 1529 save_len = ct->dd_len1; 1530 if (ct->dd_len2 == 0) { 1531 /* 1532 * The previous encrypt step was an 1533 * accumulation only and didn't produce any 1534 * partial output 1535 */ 1536 if (ct->dd_len1 == 0) 1537 break; 1538 1539 } else { 1540 ct->dd_offset1 = ct->dd_offset2; 1541 ct->dd_len1 = ct->dd_len2; 1542 } 1543 err = crypto_mac_update((crypto_context_t)mac_ctx, 1544 (crypto_data_t *)ct, NULL); 1545 1546 ct->dd_offset1 = save_offset; 1547 ct->dd_len1 = save_len; 1548 1549 break; 1550 } 1551 case KCF_OP_FINAL: { 1552 crypto_dual_data_t *ct = cmops->em_ciphertext; 1553 crypto_data_t *mac = cmops->em_mac; 1554 kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx; 1555 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1556 crypto_context_t mac_context = mac_ctx; 1557 1558 KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_FINAL, 1559 pd->pd_sid, NULL, NULL, NULL, (crypto_data_t *)ct, 1560 NULL); 1561 1562 err = kcf_submit_request(pd, ctx, NULL, &encr_params, 1563 B_FALSE); 1564 1565 /* It can't be CRYPTO_QUEUED */ 1566 if (err != CRYPTO_SUCCESS) { 1567 crypto_cancel_ctx(mac_context); 1568 break; 1569 } 1570 1571 if (ct->dd_len2 > 0) { 1572 save_offset = ct->dd_offset1; 1573 save_len = ct->dd_len1; 1574 ct->dd_offset1 = ct->dd_offset2; 1575 ct->dd_len1 = ct->dd_len2; 1576 1577 err = crypto_mac_update(mac_context, 1578 (crypto_data_t *)ct, NULL); 1579 1580 ct->dd_offset1 = save_offset; 1581 ct->dd_len1 = save_len; 1582 1583 if (err != CRYPTO_SUCCESS) { 1584 crypto_cancel_ctx(mac_context); 1585 return (err); 1586 } 1587 } 1588 1589 /* and finally, collect the MAC */ 1590 err = crypto_mac_final(mac_context, mac, NULL); 1591 break; 1592 } 1593 1594 default: 1595 break; 1596 } 1597 KCF_PROV_INCRSTATS(pd, err); 1598 break; 1599 } 1600 case KCF_OG_MAC_DECRYPT: { 1601 kcf_mac_decrypt_ops_params_t *mdops = 1602 ¶ms->rp_u.mac_decrypt_params; 1603 kcf_context_t *decr_kcf_ctx; 1604 crypto_ctx_t *mac_ctx; 1605 kcf_req_params_t decr_params; 1606 1607 decr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private); 1608 1609 switch (optype) { 1610 case KCF_OP_INIT: { 1611 decr_kcf_ctx->kc_secondctx = NULL; 1612 1613 err = crypto_mac_init(&mdops->md_mac_mech, 1614 mdops->md_mac_key, mdops->md_mac_templ, 1615 (crypto_context_t *)&mac_ctx, NULL); 1616 1617 /* It can't be CRYPTO_QUEUED */ 1618 if (err != CRYPTO_SUCCESS) { 1619 break; 1620 } 1621 1622 KCF_WRAP_DECRYPT_OPS_PARAMS(&decr_params, KCF_OP_INIT, 1623 pd->pd_sid, &mdops->md_decr_mech, 1624 mdops->md_decr_key, NULL, NULL, 1625 mdops->md_decr_templ); 1626 1627 err = kcf_submit_request(pd, ctx, NULL, &decr_params, 1628 B_FALSE); 1629 1630 /* It can't be CRYPTO_QUEUED */ 1631 if (err != CRYPTO_SUCCESS) { 1632 crypto_cancel_ctx((crypto_context_t)mac_ctx); 1633 break; 1634 } 1635 1636 decr_kcf_ctx->kc_secondctx = (kcf_context_t *) 1637 mac_ctx->cc_framework_private; 1638 KCF_CONTEXT_REFHOLD((kcf_context_t *) 1639 mac_ctx->cc_framework_private); 1640 1641 break; 1642 1643 } 1644 case KCF_OP_UPDATE: { 1645 crypto_dual_data_t *ct = mdops->md_ciphertext; 1646 crypto_data_t *pt = mdops->md_plaintext; 1647 kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx; 1648 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1649 1650 err = crypto_mac_update((crypto_context_t)mac_ctx, 1651 (crypto_data_t *)ct, NULL); 1652 1653 if (err != CRYPTO_SUCCESS) 1654 break; 1655 1656 save_offset = ct->dd_offset1; 1657 save_len = ct->dd_len1; 1658 1659 /* zero ct->dd_len2 means decrypt everything */ 1660 if (ct->dd_len2 > 0) { 1661 ct->dd_offset1 = ct->dd_offset2; 1662 ct->dd_len1 = ct->dd_len2; 1663 } 1664 1665 err = crypto_decrypt_update((crypto_context_t)ctx, 1666 (crypto_data_t *)ct, pt, NULL); 1667 1668 ct->dd_offset1 = save_offset; 1669 ct->dd_len1 = save_len; 1670 1671 break; 1672 } 1673 case KCF_OP_FINAL: { 1674 crypto_data_t *pt = mdops->md_plaintext; 1675 crypto_data_t *mac = mdops->md_mac; 1676 kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx; 1677 crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx; 1678 1679 err = crypto_mac_final((crypto_context_t)mac_ctx, 1680 mac, NULL); 1681 1682 if (err != CRYPTO_SUCCESS) { 1683 crypto_cancel_ctx(ctx); 1684 break; 1685 } 1686 1687 /* Get the last chunk of plaintext */ 1688 KCF_CONTEXT_REFHOLD(decr_kcf_ctx); 1689 err = crypto_decrypt_final((crypto_context_t)ctx, pt, 1690 NULL); 1691 1692 break; 1693 } 1694 } 1695 break; 1696 } 1697 default: 1698 1699 break; 1700 } /* end of switch(params->rp_opgrp) */ 1701 1702 return (err); 1703 } 1704