Lines Matching defs:lock

432  * gets q lock (via ipc_lookup), releases before return.
444 kmutex_t *lock;
473 if ((lock = ipc_lookup(msq_svc, msgid, (kipc_perm_t **)&qp)) == NULL)
480 mutex_exit(lock);
485 mutex_exit(lock);
494 mutex_exit(lock);
523 mutex_exit(lock);
529 mutex_exit(lock);
554 mutex_exit(lock);
558 mutex_exit(lock);
596 kmutex_t *lock;
602 if (error = ipc_get(msq_svc, key, msgflg, (kipc_perm_t **)&qp, &lock))
606 mutex_exit(lock);
654 lock = ipc_commit_end(msq_svc, &qp->msg_perm);
661 mutex_exit(lock);
670 kmutex_t *lock;
680 if ((lock = ipc_lookup(msq_svc, msqid, (kipc_perm_t **)&qp)) == NULL) {
710 error = msg_copyout(qp, msgtyp, &lock, &xtsz, msgsz,
739 &msg_entry, &lock, qp);
763 &msg_entry, &lock, qp);
767 &msg_entry, &lock, qp);
804 msg_copyout(kmsqid_t *qp, long msgtyp, kmutex_t **lock, size_t *xtsz_ret,
836 mutex_exit(*lock);
860 *lock = ipc_lock(msq_svc, qp->msg_perm.ipc_id);
972 kmutex_t *lock;
987 if ((lock = ipc_lookup(msq_svc, msqid, (kipc_perm_t **)&qp)) == NULL)
991 mutex_exit(lock);
1036 mutex_exit(lock);
1069 lock = ipc_lock(msq_svc, qp->msg_perm.ipc_id);
1074 mutex_exit(lock);
1097 kmutex_t *lock = NULL;
1131 * allocation now. This saves dropping the lock
1132 * and then reacquiring the lock.
1147 if ((lock = ipc_lookup(msq_svc, msqid, (kipc_perm_t **)&qp)) == NULL) {
1185 cvres = cv_wait_sig(&msg_entry.msgw_wake_cv, lock);
1186 lock = ipc_relock(msq_svc, qp->msg_perm.ipc_id, lock);
1199 mutex_exit(lock);
1208 lock = ipc_lock(msq_svc, qp->msg_perm.ipc_id);
1247 if (lock)
1248 ipc_rele(msq_svc, (kipc_perm_t *)qp); /* drops lock */
1404 msg_rcvq_sleep(list_t *queue, msgq_wakeup_t *entry, kmutex_t **lock,
1414 cvres = cv_wait_sig(&entry->msgw_wake_cv, *lock);
1415 *lock = ipc_relock(msq_svc, qp->msg_perm.ipc_id, *lock);