1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 * 25 * Copyright 2020 Joyent, Inc. 26 */ 27 28 #include <md5.h> 29 #include <pthread.h> 30 #include <syslog.h> 31 #include <stdlib.h> 32 #include <string.h> 33 #include <strings.h> 34 #include <sys/sha1.h> 35 #include <security/cryptoki.h> 36 #include "softGlobal.h" 37 #include "softSession.h" 38 #include "softObject.h" 39 #include "softOps.h" 40 #include "softKeystore.h" 41 #include "softKeystoreUtil.h" 42 43 44 CK_ULONG soft_session_cnt = 0; /* the number of opened sessions */ 45 CK_ULONG soft_session_rw_cnt = 0; /* the number of opened R/W sessions */ 46 47 #define DIGEST_MECH_OK(_m_) ((_m_) == CKM_MD5 || (_m_) == CKM_SHA_1) 48 49 /* 50 * Delete all the sessions. First, obtain the global session 51 * list lock. Then start to delete one session at a time. 52 * Release the global session list lock before returning to 53 * caller. 54 */ 55 CK_RV 56 soft_delete_all_sessions(boolean_t force) 57 { 58 59 CK_RV rv = CKR_OK; 60 CK_RV rv1; 61 soft_session_t *session_p; 62 soft_session_t *session_p1; 63 64 /* Acquire the global session list lock */ 65 (void) pthread_mutex_lock(&soft_sessionlist_mutex); 66 67 session_p = soft_session_list; 68 69 /* Delete all the sessions in the session list */ 70 while (session_p) { 71 session_p1 = session_p->next; 72 73 /* 74 * Delete a session by calling soft_delete_session() 75 * with a session pointer and a boolean arguments. 76 * Boolean value TRUE is used to indicate that the 77 * caller holds the lock on the global session list. 78 * 79 */ 80 rv1 = soft_delete_session(session_p, force, B_TRUE); 81 82 /* Record the very first error code */ 83 if (rv == CKR_OK) { 84 rv = rv1; 85 } 86 87 session_p = session_p1; 88 } 89 90 /* No session left */ 91 soft_session_list = NULL; 92 93 /* Release the global session list lock */ 94 (void) pthread_mutex_unlock(&soft_sessionlist_mutex); 95 96 return (rv); 97 98 } 99 100 /* 101 * Create a new session struct, and add it to the session linked list. 102 * 103 * This function will acquire the global session list lock, and release 104 * it after adding the session to the session linked list. 105 */ 106 CK_RV 107 soft_add_session(CK_FLAGS flags, CK_VOID_PTR pApplication, 108 CK_NOTIFY notify, CK_ULONG *sessionhandle_p) 109 { 110 soft_session_t *new_sp = NULL; 111 112 /* Allocate a new session struct */ 113 new_sp = calloc(1, sizeof (soft_session_t)); 114 if (new_sp == NULL) { 115 return (CKR_HOST_MEMORY); 116 } 117 118 new_sp->magic_marker = SOFTTOKEN_SESSION_MAGIC; 119 new_sp->pApplication = pApplication; 120 new_sp->Notify = notify; 121 new_sp->flags = flags; 122 new_sp->state = CKS_RO_PUBLIC_SESSION; 123 new_sp->object_list = NULL; 124 new_sp->ses_refcnt = 0; 125 new_sp->ses_close_sync = 0; 126 127 (void) pthread_mutex_lock(&soft_giant_mutex); 128 if (soft_slot.authenticated) { 129 (void) pthread_mutex_unlock(&soft_giant_mutex); 130 if (flags & CKF_RW_SESSION) { 131 new_sp->state = CKS_RW_USER_FUNCTIONS; 132 } else { 133 new_sp->state = CKS_RO_USER_FUNCTIONS; 134 } 135 } else { 136 (void) pthread_mutex_unlock(&soft_giant_mutex); 137 if (flags & CKF_RW_SESSION) { 138 new_sp->state = CKS_RW_PUBLIC_SESSION; 139 } else { 140 new_sp->state = CKS_RO_PUBLIC_SESSION; 141 } 142 } 143 144 /* Initialize the lock for the newly created session */ 145 if (pthread_mutex_init(&new_sp->session_mutex, NULL) != 0) { 146 free(new_sp); 147 return (CKR_CANT_LOCK); 148 } 149 150 (void) pthread_cond_init(&new_sp->ses_free_cond, NULL); 151 152 /* Acquire the global session list lock */ 153 (void) pthread_mutex_lock(&soft_sessionlist_mutex); 154 155 /* Generate a unique session handle. */ 156 do { 157 arc4random_buf(&new_sp->handle, sizeof (new_sp->handle)); 158 if (new_sp->handle == CK_INVALID_HANDLE) 159 continue; 160 } while (avl_find(&soft_session_tree, new_sp, NULL) != NULL); 161 162 avl_add(&soft_session_tree, new_sp); 163 *sessionhandle_p = new_sp->handle; 164 165 /* Insert the new session in front of session list */ 166 if (soft_session_list == NULL) { 167 soft_session_list = new_sp; 168 new_sp->next = NULL; 169 new_sp->prev = NULL; 170 } else { 171 soft_session_list->prev = new_sp; 172 new_sp->next = soft_session_list; 173 new_sp->prev = NULL; 174 soft_session_list = new_sp; 175 } 176 177 ++soft_session_cnt; 178 if (flags & CKF_RW_SESSION) 179 ++soft_session_rw_cnt; 180 181 if (soft_session_cnt == 1) 182 /* 183 * This is the first session to be opened, so we can set 184 * validate the public token objects in token list now. 185 */ 186 soft_validate_token_objects(B_TRUE); 187 188 /* Release the global session list lock */ 189 (void) pthread_mutex_unlock(&soft_sessionlist_mutex); 190 191 return (CKR_OK); 192 193 } 194 195 /* 196 * This function adds the to-be-freed session to a linked list. 197 * When the number of sessions queued in the linked list reaches the 198 * maximum threshold MAX_SES_TO_BE_FREED, it will free the first 199 * session (FIFO) in the list. 200 */ 201 void 202 session_delay_free(soft_session_t *sp) 203 { 204 soft_session_t *tmp; 205 206 (void) pthread_mutex_lock(&ses_delay_freed.ses_to_be_free_mutex); 207 208 /* Add the newly deleted session at the end of the list */ 209 sp->next = NULL; 210 if (ses_delay_freed.first == NULL) { 211 ses_delay_freed.last = sp; 212 ses_delay_freed.first = sp; 213 } else { 214 ses_delay_freed.last->next = sp; 215 ses_delay_freed.last = sp; 216 } 217 218 if (++ses_delay_freed.count >= MAX_SES_TO_BE_FREED) { 219 /* 220 * Free the first session in the list only if 221 * the total count reaches maximum threshold. 222 */ 223 ses_delay_freed.count--; 224 tmp = ses_delay_freed.first->next; 225 free(ses_delay_freed.first); 226 ses_delay_freed.first = tmp; 227 } 228 (void) pthread_mutex_unlock(&ses_delay_freed.ses_to_be_free_mutex); 229 } 230 231 /* 232 * Delete a session: 233 * - Remove the session from the session linked list. 234 * Holding the lock on the global session list is needed to do this. 235 * - Release all the objects created by the session. 236 * 237 * The boolean argument lock_held is used to indicate that whether 238 * the caller of this function holds the lock on the global session 239 * list or not. 240 * - When called by soft_delete_all_sessions(), which is called by 241 * C_Finalize() or C_CloseAllSessions() -- the lock_held = TRUE. 242 * - When called by C_CloseSession() -- the lock_held = FALSE. 243 * 244 * When the caller does not hold the lock on the global session 245 * list, this function will acquire that lock in order to proceed, 246 * and also release that lock before returning to caller. 247 */ 248 CK_RV 249 soft_delete_session(soft_session_t *session_p, 250 boolean_t force, boolean_t lock_held) 251 { 252 253 /* 254 * Check to see if the caller holds the lock on the global 255 * session list. If not, we need to acquire that lock in 256 * order to proceed. 257 */ 258 if (!lock_held) { 259 /* Acquire the global session list lock */ 260 (void) pthread_mutex_lock(&soft_sessionlist_mutex); 261 } 262 263 /* 264 * Remove the session from the session linked list first. 265 */ 266 if (soft_session_list == session_p) { 267 /* Session is the first one in the list */ 268 if (session_p->next) { 269 soft_session_list = session_p->next; 270 session_p->next->prev = NULL; 271 } else { 272 /* Session is the only one in the list */ 273 soft_session_list = NULL; 274 } 275 } else { 276 /* Session is not the first one in the list */ 277 if (session_p->next) { 278 /* Session is in the middle of the list */ 279 session_p->prev->next = session_p->next; 280 session_p->next->prev = session_p->prev; 281 } else { 282 /* Session is the last one in the list */ 283 session_p->prev->next = NULL; 284 } 285 } 286 287 avl_remove(&soft_session_tree, session_p); 288 289 --soft_session_cnt; 290 if (session_p->flags & CKF_RW_SESSION) 291 --soft_session_rw_cnt; 292 293 if (!lock_held) { 294 /* 295 * If the global session list lock is obtained by 296 * this function, then release that lock after 297 * removing the session from session linked list. 298 * We want the releasing of the objects of the 299 * session, and freeing of the session itself to 300 * be done without holding the global session list 301 * lock. 302 */ 303 (void) pthread_mutex_unlock(&soft_sessionlist_mutex); 304 } 305 306 307 /* Acquire the individual session lock */ 308 (void) pthread_mutex_lock(&session_p->session_mutex); 309 /* 310 * Make sure another thread hasn't freed the session. 311 */ 312 if (session_p->magic_marker != SOFTTOKEN_SESSION_MAGIC) { 313 (void) pthread_mutex_unlock(&session_p->session_mutex); 314 return (CKR_OK); 315 } 316 317 /* 318 * The deletion of a session must be blocked when the session 319 * reference count is not zero. This means if any session related 320 * operation starts prior to the session close operation gets in, 321 * the session closing thread must wait for the non-closing 322 * operation to be completed before it can proceed the close 323 * operation. 324 * 325 * Unless we are being forced to shut everything down, this only 326 * happens if the libraries _fini() is running not of someone 327 * explicitly called C_Finalize(). 328 */ 329 if (force) 330 session_p->ses_refcnt = 0; 331 332 while (session_p->ses_refcnt != 0) { 333 /* 334 * We set the SESSION_REFCNT_WAITING flag before we put 335 * this closing thread in a wait state, so other non-closing 336 * operation thread will signal to wake it up only when 337 * the session reference count becomes zero and this flag 338 * is set. 339 */ 340 session_p->ses_close_sync |= SESSION_REFCNT_WAITING; 341 (void) pthread_cond_wait(&session_p->ses_free_cond, 342 &session_p->session_mutex); 343 } 344 345 session_p->ses_close_sync &= ~SESSION_REFCNT_WAITING; 346 347 /* 348 * Remove all the objects created in this session. 349 */ 350 soft_delete_all_objects_in_session(session_p, force); 351 352 /* 353 * Mark session as no longer valid. This can only be done after all 354 * objects created by this session are free'd since the marker is 355 * still needed in the process of removing objects from the session. 356 */ 357 session_p->magic_marker = 0; 358 359 (void) pthread_cond_destroy(&session_p->ses_free_cond); 360 361 /* In case application did not call Final */ 362 if (session_p->digest.context != NULL) 363 free(session_p->digest.context); 364 365 if (session_p->encrypt.context != NULL) 366 /* 367 * 1st B_TRUE: encrypt 368 * 2nd B_TRUE: caller is holding session_mutex. 369 */ 370 soft_crypt_cleanup(session_p, B_TRUE, B_TRUE); 371 372 if (session_p->decrypt.context != NULL) 373 /* 374 * 1st B_FALSE: decrypt 375 * 2nd B_TRUE: caller is holding session_mutex. 376 */ 377 soft_crypt_cleanup(session_p, B_FALSE, B_TRUE); 378 379 if (session_p->sign.context != NULL) 380 free(session_p->sign.context); 381 382 if (session_p->verify.context != NULL) 383 free(session_p->verify.context); 384 385 if (session_p->find_objects.context != NULL) { 386 find_context_t *fcontext; 387 fcontext = (find_context_t *)session_p->find_objects.context; 388 free(fcontext->objs_found); 389 free(fcontext); 390 } 391 392 /* Reset SESSION_IS_CLOSING flag. */ 393 session_p->ses_close_sync &= ~SESSION_IS_CLOSING; 394 395 (void) pthread_mutex_unlock(&session_p->session_mutex); 396 /* Destroy the individual session lock */ 397 (void) pthread_mutex_destroy(&session_p->session_mutex); 398 399 /* Delay freeing the session */ 400 session_delay_free(session_p); 401 402 return (CKR_OK); 403 } 404 405 406 /* 407 * This function is used to type cast a session handle to a pointer to 408 * the session struct. Also, it does the following things: 409 * 1) Check to see if the session struct is tagged with a session 410 * magic number. This is to detect when an application passes 411 * a bogus session pointer. 412 * 2) Acquire the lock on the designated session. 413 * 3) Check to see if the session is in the closing state that another 414 * thread is performing. 415 * 4) Increment the session reference count by one. This is to prevent 416 * this session from being closed by other thread. 417 * 5) Release the lock held on the designated session. 418 */ 419 CK_RV 420 handle2session(CK_SESSION_HANDLE hSession, soft_session_t **session_p) 421 { 422 423 soft_session_t *sp; 424 soft_session_t node; 425 426 /* 427 * No need to hold soft_sessionlist_mutex as we are 428 * just reading the value and 32-bit reads are atomic. 429 */ 430 if (all_sessions_closing) { 431 return (CKR_SESSION_CLOSED); 432 } 433 434 (void) memset(&node, 0, sizeof (node)); 435 node.handle = hSession; 436 437 (void) pthread_mutex_lock(&soft_sessionlist_mutex); 438 439 sp = avl_find(&soft_session_tree, &node, NULL); 440 if ((sp == NULL) || 441 (sp->magic_marker != SOFTTOKEN_SESSION_MAGIC)) { 442 (void) pthread_mutex_unlock(&soft_sessionlist_mutex); 443 return (CKR_SESSION_HANDLE_INVALID); 444 } 445 (void) pthread_mutex_lock(&sp->session_mutex); 446 (void) pthread_mutex_unlock(&soft_sessionlist_mutex); 447 448 if (sp->ses_close_sync & SESSION_IS_CLOSING) { 449 (void) pthread_mutex_unlock(&sp->session_mutex); 450 return (CKR_SESSION_CLOSED); 451 } 452 453 /* Increment session ref count. */ 454 sp->ses_refcnt++; 455 456 (void) pthread_mutex_unlock(&sp->session_mutex); 457 458 *session_p = sp; 459 460 return (CKR_OK); 461 } 462 463 /* 464 * The format to be saved in the pOperationState will be: 465 * 1. internal_op_state_t 466 * 2. crypto_active_op_t 467 * 3. actual context of the active operation 468 */ 469 CK_RV 470 soft_get_operationstate(soft_session_t *session_p, CK_BYTE_PTR pOperationState, 471 CK_ULONG_PTR pulOperationStateLen) 472 { 473 474 internal_op_state_t *p_op_state; 475 CK_ULONG op_data_len = 0; 476 CK_RV rv = CKR_OK; 477 478 if (pulOperationStateLen == NULL) 479 return (CKR_ARGUMENTS_BAD); 480 481 (void) pthread_mutex_lock(&session_p->session_mutex); 482 483 /* Check to see if encrypt operation is active. */ 484 if (session_p->encrypt.flags & CRYPTO_OPERATION_ACTIVE) { 485 rv = CKR_STATE_UNSAVEABLE; 486 goto unlock_session; 487 } 488 489 /* Check to see if decrypt operation is active. */ 490 if (session_p->decrypt.flags & CRYPTO_OPERATION_ACTIVE) { 491 rv = CKR_STATE_UNSAVEABLE; 492 goto unlock_session; 493 } 494 495 /* Check to see if sign operation is active. */ 496 if (session_p->sign.flags & CRYPTO_OPERATION_ACTIVE) { 497 rv = CKR_STATE_UNSAVEABLE; 498 goto unlock_session; 499 } 500 501 /* Check to see if verify operation is active. */ 502 if (session_p->verify.flags & CRYPTO_OPERATION_ACTIVE) { 503 rv = CKR_STATE_UNSAVEABLE; 504 goto unlock_session; 505 } 506 507 /* Check to see if digest operation is active. */ 508 if (session_p->digest.flags & CRYPTO_OPERATION_ACTIVE) { 509 op_data_len = sizeof (internal_op_state_t) + 510 sizeof (crypto_active_op_t); 511 512 switch (session_p->digest.mech.mechanism) { 513 case CKM_MD5: 514 op_data_len += sizeof (MD5_CTX); 515 break; 516 case CKM_SHA_1: 517 op_data_len += sizeof (SHA1_CTX); 518 break; 519 default: 520 rv = CKR_STATE_UNSAVEABLE; 521 goto unlock_session; 522 } 523 524 if (pOperationState == NULL_PTR) { 525 *pulOperationStateLen = op_data_len; 526 goto unlock_session; 527 } else { 528 if (*pulOperationStateLen < op_data_len) { 529 *pulOperationStateLen = op_data_len; 530 rv = CKR_BUFFER_TOO_SMALL; 531 goto unlock_session; 532 } 533 } 534 535 /* Save internal_op_state_t */ 536 /* LINTED E_BAD_PTR_CAST_ALIGN */ 537 p_op_state = (internal_op_state_t *)pOperationState; 538 p_op_state->op_len = op_data_len; 539 p_op_state->op_active = DIGEST_OP; 540 p_op_state->op_session_state = session_p->state; 541 542 /* Save crypto_active_op_t */ 543 (void) memcpy((CK_BYTE *)pOperationState + 544 sizeof (internal_op_state_t), 545 &session_p->digest, 546 sizeof (crypto_active_op_t)); 547 548 switch (session_p->digest.mech.mechanism) { 549 case CKM_MD5: 550 /* Save MD5_CTX for the active digest operation */ 551 (void) memcpy((CK_BYTE *)pOperationState + 552 sizeof (internal_op_state_t) + 553 sizeof (crypto_active_op_t), 554 session_p->digest.context, 555 sizeof (MD5_CTX)); 556 break; 557 558 case CKM_SHA_1: 559 /* Save SHA1_CTX for the active digest operation */ 560 (void) memcpy((CK_BYTE *)pOperationState + 561 sizeof (internal_op_state_t) + 562 sizeof (crypto_active_op_t), 563 session_p->digest.context, 564 sizeof (SHA1_CTX)); 565 break; 566 567 default: 568 rv = CKR_STATE_UNSAVEABLE; 569 } 570 } else { 571 rv = CKR_OPERATION_NOT_INITIALIZED; 572 goto unlock_session; 573 } 574 575 *pulOperationStateLen = op_data_len; 576 577 unlock_session: 578 (void) pthread_mutex_unlock(&session_p->session_mutex); 579 580 return (rv); 581 582 } 583 584 static CK_BYTE_PTR alloc_digest(CK_ULONG mech) 585 { 586 CK_BYTE_PTR ret_val; 587 588 switch (mech) { 589 case CKM_MD5: 590 ret_val = (CK_BYTE_PTR) malloc(sizeof (MD5_CTX)); 591 break; 592 case CKM_SHA_1: 593 ret_val = (CK_BYTE_PTR) malloc(sizeof (SHA1_CTX)); 594 break; 595 default: ret_val = NULL; 596 } 597 598 return (ret_val); 599 } 600 601 /* 602 * The format to be restored from the pOperationState will be: 603 * 1. internal_op_state_t 604 * 2. crypto_active_op_t 605 * 3. actual context of the saved operation 606 */ 607 CK_RV 608 soft_set_operationstate(soft_session_t *session_p, CK_BYTE_PTR pOperationState, 609 CK_ULONG ulOperationStateLen, CK_OBJECT_HANDLE hEncryptionKey, 610 CK_OBJECT_HANDLE hAuthenticationKey) 611 { 612 613 CK_RV rv = CKR_OK; 614 internal_op_state_t *p_op_state; 615 crypto_active_op_t *p_active_op; 616 CK_ULONG offset = 0; 617 CK_ULONG mech; 618 void *free_it = NULL; 619 620 /* LINTED E_BAD_PTR_CAST_ALIGN */ 621 p_op_state = (internal_op_state_t *)pOperationState; 622 623 if (p_op_state->op_len != ulOperationStateLen) { 624 /* 625 * The supplied data length does not match with 626 * the saved data length. 627 */ 628 return (CKR_SAVED_STATE_INVALID); 629 } 630 631 if (p_op_state->op_active != DIGEST_OP) 632 return (CKR_SAVED_STATE_INVALID); 633 634 if ((hAuthenticationKey != 0) || (hEncryptionKey != 0)) { 635 return (CKR_KEY_NOT_NEEDED); 636 } 637 638 offset = sizeof (internal_op_state_t); 639 /* LINTED E_BAD_PTR_CAST_ALIGN */ 640 p_active_op = (crypto_active_op_t *)(pOperationState + offset); 641 offset += sizeof (crypto_active_op_t); 642 mech = p_active_op->mech.mechanism; 643 644 if (!DIGEST_MECH_OK(mech)) { 645 return (CKR_SAVED_STATE_INVALID); 646 } 647 648 /* 649 * We may reuse digest.context in case the digest mechanisms (the one, 650 * which belongs to session and the operation, which we are restoring) 651 * are the same. If digest mechanisms are different, we have to release 652 * the digest context, which belongs to session and allocate a new one. 653 */ 654 (void) pthread_mutex_lock(&session_p->session_mutex); 655 656 if (session_p->state != p_op_state->op_session_state) { 657 /* 658 * The supplied session state does not match with 659 * the saved session state. 660 */ 661 rv = CKR_SAVED_STATE_INVALID; 662 goto unlock_session; 663 } 664 665 if (session_p->digest.context && 666 (session_p->digest.mech.mechanism != mech)) { 667 free_it = session_p->digest.context; 668 session_p->digest.context = NULL; 669 } 670 671 if (session_p->digest.context == NULL) { 672 session_p->digest.context = alloc_digest(mech); 673 674 if (session_p->digest.context == NULL) { 675 /* 676 * put back original context into session in case 677 * allocation of new context has failed. 678 */ 679 session_p->digest.context = free_it; 680 free_it = NULL; 681 rv = CKR_HOST_MEMORY; 682 goto unlock_session; 683 } 684 } 685 686 /* Restore crypto_active_op_t */ 687 session_p->digest.mech.mechanism = mech; 688 session_p->digest.flags = p_active_op->flags; 689 690 switch (mech) { 691 case CKM_MD5: 692 /* Restore MD5_CTX from the saved digest operation */ 693 (void) memcpy((CK_BYTE *)session_p->digest.context, 694 (CK_BYTE *)pOperationState + offset, 695 sizeof (MD5_CTX)); 696 break; 697 case CKM_SHA_1: 698 /* Restore SHA1_CTX from the saved digest operation */ 699 (void) memcpy((CK_BYTE *)session_p->digest.context, 700 (CK_BYTE *)pOperationState + offset, 701 sizeof (SHA1_CTX)); 702 break; 703 default: 704 /* never reached */ 705 rv = CKR_SAVED_STATE_INVALID; 706 } 707 708 unlock_session: 709 (void) pthread_mutex_unlock(&session_p->session_mutex); 710 711 if (free_it != NULL) 712 free(free_it); 713 714 return (rv); 715 } 716 717 718 CK_RV 719 soft_login(CK_UTF8CHAR_PTR pPin, CK_ULONG ulPinLen) 720 { 721 722 /* 723 * Authenticate the input PIN. 724 */ 725 return (soft_verify_pin(pPin, ulPinLen)); 726 727 } 728 729 void 730 soft_logout(void) 731 { 732 733 /* 734 * Delete all the private token objects from the "token_object_list". 735 */ 736 soft_delete_all_in_core_token_objects(PRIVATE_TOKEN); 737 return; 738 739 } 740 741 void 742 soft_acquire_all_session_mutexes(soft_session_t *session_p) 743 { 744 /* Iterate through sessions acquiring all mutexes */ 745 while (session_p) { 746 soft_object_t *object_p; 747 748 (void) pthread_mutex_lock(&session_p->session_mutex); 749 object_p = session_p->object_list; 750 751 /* Lock also all objects related to session */ 752 while (object_p) { 753 (void) pthread_mutex_lock(&object_p->object_mutex); 754 object_p = object_p->next; 755 } 756 session_p = session_p->next; 757 } 758 } 759 760 void 761 soft_release_all_session_mutexes(soft_session_t *session_p) 762 { 763 /* Iterate through sessions releasing all mutexes */ 764 while (session_p) { 765 /* 766 * N.B. Ideally, should go in opposite order to guarantee 767 * lock-order requirements but there is no tail pointer. 768 */ 769 soft_object_t *object_p = session_p->object_list; 770 771 /* Unlock also all objects related to session */ 772 while (object_p) { 773 (void) pthread_mutex_unlock(&object_p->object_mutex); 774 object_p = object_p->next; 775 } 776 (void) pthread_mutex_unlock(&session_p->session_mutex); 777 session_p = session_p->next; 778 } 779 } 780