Home
last modified time | relevance | path

Searched refs:lock (Results 1 – 25 of 692) sorted by relevance

12345678910>>...28

/titanic_51/usr/src/uts/common/sys/
H A Dflock_impl.h56 struct lock_descriptor *from_vertex; /* edge emanating from lock */
57 struct lock_descriptor *to_vertex; /* edge pointing to lock */
63 struct lock_descriptor *l_next; /* next active/sleep lock */
64 struct lock_descriptor *l_prev; /* previous active/sleep lock */
73 int l_type; /* type of lock */
79 kcondvar_t l_cv; /* wait condition for lock */
106 * The possible states a lock can be in. These states are stored in the
155 * The LLM design has been modified so that lock states are now stored
160 * to the implementation of the lock manager and should not be used
166 #define IO_LOCK 0x0004 /* is an IO lock */
229 SET_LOCK_TO_FIRST_ACTIVE_VP(gp,lock,vp) global() argument
234 SET_LOCK_TO_FIRST_SLEEP_VP(gp,lock,vp) global() argument
248 IS_INITIAL(lock) global() argument
249 IS_ACTIVE(lock) global() argument
250 IS_SLEEPING(lock) global() argument
251 IS_GRANTED(lock) global() argument
252 IS_INTERRUPTED(lock) global() argument
253 IS_CANCELLED(lock) global() argument
254 IS_DEAD(lock) global() argument
256 IS_QUERY_LOCK(lock) global() argument
257 IS_RECOMPUTE(lock) global() argument
258 IS_BARRIER(lock) global() argument
259 IS_DELETED(lock) global() argument
260 IS_REFERENCED(lock) global() argument
261 IS_IO_LOCK(lock) global() argument
262 IS_WILLING_TO_SLEEP(lock) global() argument
264 IS_LOCKMGR(lock) global() argument
265 IS_NLM_UP(lock) global() argument
267 IS_PXFS(lock) global() argument
278 IS_LOCAL(lock) global() argument
279 IS_REMOTE(lock) global() argument
318 NOT_BLOCKED(lock) global() argument
321 GRANT_WAKEUP(lock) global() argument
334 CANCEL_WAKEUP(lock) global() argument
347 INTERRUPT_WAKEUP(lock) global() argument
360 REMOVE_SLEEP_QUEUE(lock) global() argument
370 NO_DEPENDENTS(lock) global() argument
373 GRANT(lock) global() argument
379 FIRST_IN(lock) global() argument
380 FIRST_ADJ(lock) global() argument
381 HEAD(lock) global() argument
384 IN_ADJ_INIT(lock) global() argument
407 SET_NLM_STATE(lock,nlm_state) global() argument
408 GET_NLM_STATE(lock) global() argument
479 PROC_SAME_OWNER(lock,pvertex) global() argument
[all...]
/titanic_51/usr/src/uts/common/fs/smbsrv/
H A Dsmb_lock.c27 * This module provides range lock functionality for CIFS/SMB clients.
28 * Lock range service functions process SMB lock and and unlock
29 * requests for a file by applying lock rules and marks file range
30 * as locked if the lock is successful otherwise return proper
61 smb_lock_t *lock; in smb_lock_get_lock_count() local
71 for (lock = smb_llist_head(llist); in smb_lock_get_lock_count()
72 lock != NULL; in smb_lock_get_lock_count()
73 lock = smb_llist_next(llist, lock)) { in smb_lock_get_lock_count()
74 if (lock in smb_lock_get_lock_count()
97 smb_lock_t *lock = NULL; smb_unlock_range() local
148 smb_lock_t *lock; smb_lock_range() local
277 smb_lock_t *lock; smb_lock_range_access() local
310 smb_lock_t *lock; smb_node_destroy_lock_by_ofile() local
424 smb_lock_posix_unlock(smb_node_t * node,smb_lock_t * lock,cred_t * cr) smb_lock_posix_unlock() argument
485 smb_lock_range_overlap(struct smb_lock * lock,uint64_t start,uint64_t length) smb_lock_range_overlap() argument
533 smb_lock_t *lock; smb_lock_range_lckrules() local
702 smb_lock_t *lock; smb_lock_range_ulckrules() local
733 smb_lock_t *lock; smb_lock_create() local
766 smb_lock_free(smb_lock_t * lock) smb_lock_free() argument
781 smb_lock_destroy(smb_lock_t * lock) smb_lock_destroy() argument
[all...]
/titanic_51/usr/src/uts/common/os/
H A Dflock.c74 cmn_err(CE_PANIC, "Illegal lock transition \
111 * lock: mutex]
136 static kmutex_t nlm_reg_lock; /* lock to protect arrary */
140 * Although we need a global lock dependency graph (and associated data
141 * structures), we also need a per-zone notion of whether the lock manager is
142 * running, and so whether to allow lock manager requests or not.
145 * (flk_lockmgr_status), protected by flock_lock, and set when the lock
151 * The per-graph copies are used to synchronize lock requests with shutdown
164 static void flk_free_lock(lock_descriptor_t *lock);
238 * KLM module not loaded; lock manage in flk_get_lockmgr_status()
461 lock_descriptor_t *fplock, *lock, *nlock; ofdcleanlock() local
1010 flk_free_lock(lock_descriptor_t * lock) flk_free_lock() argument
1028 flk_set_state(lock_descriptor_t * lock,int new_state) flk_set_state() argument
1083 lock_descriptor_t *lock; flk_process_request() local
1305 lock_descriptor_t *lock, *lock1; flk_execute_request() local
1613 flk_relation(lock_descriptor_t * lock,lock_descriptor_t * request) flk_relation() argument
1845 lock_descriptor_t *first_lock, *lock; flk_insert_active_lock() local
1882 flk_delete_active_lock(lock_descriptor_t * lock,int free_lock) flk_delete_active_lock() argument
1921 lock_descriptor_t *lock; flk_insert_sleeping_lock() local
1952 lock_descriptor_t *vertex, *lock; flk_cancel_sleeping_lock() local
2090 lock_descriptor_t *lock; flk_graph_uncolor() local
2111 flk_wakeup(lock_descriptor_t * lock,int adj_list_remove) flk_wakeup() argument
2168 lock_descriptor_t *vertex, *lock; flk_recompute_dependencies() local
2262 lock_descriptor_t *ver, *lock; flk_color_reachables() local
2295 flk_update_barriers(lock_descriptor_t * lock) flk_update_barriers() argument
2339 flk_find_barriers(lock_descriptor_t * lock) flk_find_barriers() argument
2388 lock_descriptor_t *lock, *blocker; flk_get_first_blocking_lock() local
2503 lock_descriptor_t *lock; cl_flk_has_remote_locks_for_nlmid() local
2577 lock_descriptor_t *lock; flk_has_remote_locks() local
2625 lock_descriptor_t *lock; flk_has_remote_locks_for_sysid() local
2682 lock_descriptor_t *lock; flk_sysid_has_locks() local
2735 lock_descriptor_t *lock, *nlock; cl_flk_remove_locks_by_sysid() local
2788 lock_descriptor_t *lock, *nlock; flk_delete_locks_by_sysid() local
2842 lock_descriptor_t *lock, *nlock; cl_flk_delete_pxfs_locks() local
2901 lock_descriptor_t *lock, *nlock; flk_canceled() local
2934 lock_descriptor_t *lock, *nlock; cleanlocks() local
3079 flk_check_deadlock(lock_descriptor_t * lock) flk_check_deadlock() argument
3255 flk_get_proc_vertex(lock_descriptor_t * lock) flk_get_proc_vertex() argument
3663 lock_descriptor_t *lock; get_lock_list() local
3847 lock_descriptor_t *lock; /* lock */ cl_flk_change_nlm_state_all_locks() local
3908 lock_descriptor_t *lock; cl_flk_wakeup_sleeping_nlm_locks() local
3956 lock_descriptor_t *lock; cl_flk_unlock_nlm_granted() local
4006 lock_descriptor_t *lock; wakeup_sleeping_lockmgr_locks() local
4041 lock_descriptor_t *lock; unlock_lockmgr_granted() local
4292 lock_descriptor_t *lock; nbl_lock_conflict() local
4354 lock_descriptor_t *lock, *lock1; check_active_locks() local
4538 lock_descriptor_t *lock; check_owner_locks() local
[all...]
/titanic_51/usr/src/cmd/vntsd/
H A Dvntsdvcc.c58 (void) mutex_lock(&clientp->lock); in vntsd_notify_client_cons_del()
61 (void) mutex_unlock(&clientp->lock); in vntsd_notify_client_cons_del()
70 (void) mutex_destroy(&consp->lock); in free_cons()
82 (void) mutex_destroy(&groupp->lock); in free_group()
107 (void) mutex_lock(&consp->lock); in cleanup_cons()
122 (void) cond_reltimedwait(&consp->cvp, &consp->lock, &to); in cleanup_cons()
126 (void) mutex_lock(&groupp->lock); in cleanup_cons()
129 (void) mutex_unlock(&groupp->lock); in cleanup_cons()
131 (void) mutex_unlock(&consp->lock); in cleanup_cons()
167 (void) mutex_lock(&vntsdp->lock); in vntsd_delete_cons()
[all...]
H A Dconsole.c113 (void) mutex_lock(&consp->lock); in create_write_thread()
122 (void) mutex_unlock(&consp->lock); in create_write_thread()
126 (void) mutex_unlock(&consp->lock); in create_write_thread()
156 (void) mutex_lock(&groupp->lock); in list_all_domains()
163 (void) mutex_unlock(&groupp->lock); in list_all_domains()
259 (void) mutex_lock(&groupp->lock); in select_cons()
261 (void) mutex_unlock(&groupp->lock); in select_cons()
265 (void) mutex_unlock(&groupp->lock); in select_cons()
318 (void) mutex_lock(&groupp->lock); in select_cons()
325 (void) mutex_unlock(&groupp->lock); in select_cons()
[all...]
/titanic_51/usr/src/contrib/ast/src/lib/libast/aso/
H A Daso-fcntl.c55 struct flock lock;
61 lock.l_type = F_WRLCK;
62 lock.l_whence = SEEK_SET;
63 lock.l_start = apl->size;
64 lock.l_len = sizeof(references);
65 if (fcntl(apl->fd, F_SETLKW, &lock) >= 0)
79 lock.l_type = F_UNLCK;
80 fcntl(apl->fd, F_SETLK, &lock);
133 lock.l_type = F_WRLCK;
134 lock
[all...]
H A Dasolock.c33 asolock(unsigned int volatile* lock, unsigned int key, int type)
41 return *lock == 0 ? 0 : asocasint(lock, key, 0) == key ? 0 : -1;
43 return *lock == key ? 0 : asocasint(lock, 0, key) == 0 ? 0 : -1;
45 if (*lock == key)
49 for (k = 0; asocasint(lock, 0, key) != 0; ASOLOOP(k));
/titanic_51/usr/src/lib/libcrypt/common/
H A Dcryptio.c60 static mutex_t lock = DEFAULTMUTEX; variable
70 (void) mutex_lock(&lock); in run_setkey()
72 (void) mutex_unlock(&lock); in run_setkey()
78 (void) mutex_unlock(&lock); in run_setkey()
82 (void) mutex_unlock(&lock); in run_setkey()
85 (void) mutex_unlock(&lock); in run_setkey()
127 (void) mutex_lock(&lock); in run_crypt()
135 (void) mutex_unlock(&lock); in run_crypt()
141 (void) mutex_unlock(&lock); in run_crypt()
147 (void) mutex_unlock(&lock); in run_crypt()
[all...]
/titanic_51/usr/src/lib/libnsl/common/
H A Ddaemon_utils.c46 * Use an advisory lock to ensure that only one daemon process is
47 * active in the system at any point in time. If the lock is held
49 * the lock to the caller immediately. The lock is cleared if the
50 * holding daemon process exits for any reason even if the lock
55 * check if another process is holding lock on the lock file.
65 struct flock lock; in _check_daemon_lock() local
73 lock.l_type = F_WRLCK; in _check_daemon_lock()
74 lock in _check_daemon_lock()
144 struct flock lock; _enter_daemon_lock() local
[all...]
/titanic_51/usr/src/lib/libsqlite/test/
H A Dlock.test17 # $Id: lock.test,v 1.20 2004/02/14 16:31:04 drh Exp $
25 do_test lock-1.0 {
29 do_test lock-1.1 {
32 do_test lock-1.2 {
35 do_test lock-1.3 {
39 #do_test lock-1.4 {
44 do_test lock-1.5 {
50 do_test lock-1.6 {
54 do_test lock-1.7 {
57 do_test lock
[all...]
/titanic_51/usr/src/lib/krb5/plugins/kdb/db2/
H A Dadb_openclose.c71 /* only create the lock file if we successfully created the db */ in osa_adb_create_db()
124 * Do not release the lock on fromdb because it is being renamed in osa_adb_rename_db()
170 * distinct lockinfo structures, things get confused: lock(A), in osa_adb_init_db()
171 * lock(B), release(B) will result in the kernel unlocking the in osa_adb_init_db()
172 * lock file but handle A will still think the file is locked. in osa_adb_init_db()
173 * Therefore, all handles using the same lock file must share a in osa_adb_init_db()
179 * lock files. This code used to use a single static lockinfo in osa_adb_init_db()
181 * the first database's lock file. This was Bad. in osa_adb_init_db()
241 db->lock = &lockp->lockinfo; in osa_adb_init_db()
242 db->lock in osa_adb_init_db()
[all...]
/titanic_51/usr/src/lib/gss_mechs/mech_krb5/krb5/rcache/
H A Drc_mem.c112 err = k5_mutex_lock(&id->lock); in krb5_rc_mem_get_span()
116 if (err = k5_mutex_lock(&grcache.lock)) { in krb5_rc_mem_get_span()
117 k5_mutex_unlock(&id->lock); in krb5_rc_mem_get_span()
122 k5_mutex_unlock(&grcache.lock); in krb5_rc_mem_get_span()
124 k5_mutex_unlock(&id->lock); in krb5_rc_mem_get_span()
144 retval = k5_mutex_lock(&id->lock); in krb5_rc_mem_init()
147 retval = k5_mutex_lock(&grcache.lock); in krb5_rc_mem_init()
149 k5_mutex_unlock(&id->lock); in krb5_rc_mem_init()
155 k5_mutex_unlock(&grcache.lock); in krb5_rc_mem_init()
156 k5_mutex_unlock(&id->lock); in krb5_rc_mem_init()
[all...]
/titanic_51/usr/src/common/atomic/amd64/
H A Datomic.s46 lock
54 lock
62 lock
70 lock
80 lock
91 lock
102 lock
113 lock
122 lock
130 lock
[all...]
/titanic_51/usr/src/uts/common/io/hxge/
H A Dhxge_common_impl.h119 #define MUTEX_INIT(lock, name, type, arg) \ argument
120 mutex_init(lock, name, type, arg)
121 #define MUTEX_ENTER(lock) mutex_enter(lock) argument
122 #define MUTEX_TRY_ENTER(lock) mutex_tryenter(lock) argument
123 #define MUTEX_EXIT(lock) mutex_exit(lock) argument
124 #define MUTEX_DESTROY(lock) mutex_destroy(lock) argument
126 RW_INIT(lock,name,type,arg) global() argument
127 RW_ENTER_WRITER(lock) global() argument
128 RW_ENTER_READER(lock) global() argument
129 RW_TRY_ENTER(lock,type) global() argument
130 RW_EXIT(lock) global() argument
131 RW_DESTROY(lock) global() argument
[all...]
/titanic_51/usr/src/uts/common/io/fibre-channel/fca/emlxs/
H A Demlxs_thread.c46 mutex_enter(&tthread->lock); in emlxs_taskq_thread()
57 cv_wait(&tthread->cv_flag, &tthread->lock); in emlxs_taskq_thread()
65 mutex_exit(&tthread->lock); in emlxs_taskq_thread()
69 mutex_enter(&tthread->lock); in emlxs_taskq_thread()
75 mutex_exit(&tthread->lock); in emlxs_taskq_thread()
127 mutex_enter(&tthread->lock); in emlxs_taskq_dispatch()
131 mutex_exit(&tthread->lock); in emlxs_taskq_dispatch()
171 mutex_init(&tthread->lock, NULL, MUTEX_DRIVER, in emlxs_taskq_create()
222 * If the thread lock can be acquired, in emlxs_taskq_destroy()
229 mutex_enter(&tthread->lock); in emlxs_taskq_destroy()
[all...]
/titanic_51/usr/src/uts/common/fs/smbclnt/smbfs/
H A Dsmbfs_rwlock.c32 * A homegrown reader/writer lock implementation. It addresses
62 mutex_enter(&l->lock); in smbfs_rw_enter_sig()
69 /* lock is held for writing by current thread */ in smbfs_rw_enter_sig()
85 if (!cv_wait_sig(&l->cv, &l->lock)) { in smbfs_rw_enter_sig()
88 mutex_exit(&l->lock); in smbfs_rw_enter_sig()
94 cv_wait(&l->cv, &l->lock); in smbfs_rw_enter_sig()
120 if (!cv_wait_sig(&l->cv, &l->lock)) { in smbfs_rw_enter_sig()
125 mutex_exit(&l->lock); in smbfs_rw_enter_sig()
131 cv_wait(&l->cv, &l->lock); in smbfs_rw_enter_sig()
138 mutex_exit(&l->lock); in smbfs_rw_enter_sig()
[all...]
/titanic_51/usr/src/lib/libslp/clib/
H A Dslp_queue.c63 mutex_t *lock; member
77 mutex_t *lock; in slp_new_queue() local
84 if ((lock = calloc(1, sizeof (*lock))) == NULL) { in slp_new_queue()
106 q->lock = lock; in slp_new_queue()
127 (void) mutex_lock(q->lock); in slp_enqueue()
137 (void) mutex_unlock(q->lock); in slp_enqueue()
156 (void) mutex_lock(q->lock); in slp_enqueue_at_head()
162 (void) mutex_unlock(q->lock); in slp_enqueue_at_head()
[all...]
/titanic_51/usr/src/contrib/ast/src/lib/libast/astsa/
H A Daso.c27 asolock(unsigned int volatile* lock, unsigned int key, int type) in asolock() argument
35 if (*lock != 0) in asolock()
37 if (*lock != key) in asolock()
39 *lock = 0; in asolock()
43 if (*lock != key) in asolock()
45 if (*lock != 0) in asolock()
47 *lock = key; in asolock()
52 *lock = key; in asolock()
/titanic_51/usr/src/uts/sun4v/io/
H A Dvcc.c352 ASSERT(mutex_owned(&vport->lock)); in i_vcc_wait_port_status()
386 rv = cv_wait_sig(cv, &vport->lock); in i_vcc_wait_port_status()
402 mutex_enter(&vport->lock); in i_vcc_set_port_status()
405 mutex_exit(&vport->lock); in i_vcc_set_port_status()
415 ASSERT(mutex_owned(&vport->lock)); in i_vcc_ldc_init()
477 ASSERT(mutex_owned(&vport->lock)); in i_vcc_ldc_fini()
497 mutex_exit(&vport->lock); in i_vcc_ldc_fini()
500 mutex_enter(&vport->lock); in i_vcc_ldc_fini()
569 /* make sure holding read lock */ in i_vcc_read_ldc()
634 * do not need to hold lock becaus in vcc_ldc_cb()
[all...]
H A Dldc_shm.c204 mutex_enter(&ldcp->lock); in ldc_mem_alloc_handle()
211 mutex_exit(&ldcp->lock); in ldc_mem_alloc_handle()
218 /* initialize the lock */ in ldc_mem_alloc_handle()
219 mutex_init(&mhdl->lock, NULL, MUTEX_DRIVER, NULL); in ldc_mem_alloc_handle()
239 mutex_exit(&ldcp->lock); in ldc_mem_alloc_handle()
263 mutex_enter(&mhdl->lock); in ldc_mem_free_handle()
271 mutex_exit(&mhdl->lock); in ldc_mem_free_handle()
274 mutex_exit(&mhdl->lock); in ldc_mem_free_handle()
283 mutex_destroy(&mhdl->lock); in ldc_mem_free_handle()
294 mutex_destroy(&mhdl->lock); in ldc_mem_free_handle()
[all...]
/titanic_51/usr/src/lib/udapl/udapl_tavor/common/
H A Ddapl_ia_util.c91 dapl_os_lock_init(&ia_ptr->header.lock); in dapl_ia_alloc()
458 * handle. Manipulate under lock to prevent races with threads trying to
465 dapl_os_lock(&hca_ptr->lock); in dapli_ia_release_hca()
484 dapl_os_unlock(&hca_ptr->lock); in dapli_ia_release_hca()
523 dapl_os_lock_destroy(&ia_ptr->header.lock); in dapls_ia_free()
549 dapl_os_lock(&ia_ptr->header.lock); in dapl_ia_link_ep()
553 dapl_os_unlock(&ia_ptr->header.lock); in dapl_ia_link_ep()
577 dapl_os_lock(&ia_ptr->header.lock); in dapl_ia_unlink_ep()
580 dapl_os_unlock(&ia_ptr->header.lock); in dapl_ia_unlink_ep()
604 dapl_os_lock(&ia_ptr->header.lock); in dapl_ia_link_lmr()
[all...]
/titanic_51/usr/src/uts/sun4u/excalibur/io/
H A Dxcalwd.c56 kmutex_t lock; member
263 mutex_init(&tsp->lock, NULL, MUTEX_DRIVER, NULL); in xcalwd_attach()
290 mutex_destroy(&tsp->lock); in xcalwd_detach()
318 mutex_enter(&tsp->lock); in xcalwd_timeout()
321 mutex_exit(&tsp->lock); in xcalwd_timeout()
324 mutex_exit(&tsp->lock); in xcalwd_timeout()
367 mutex_enter(&tsp->lock); in xcalwd_close()
370 mutex_exit(&tsp->lock); in xcalwd_close()
380 mutex_exit(&tsp->lock); in xcalwd_close()
425 mutex_enter(&tsp->lock); in xcalwd_ioctl()
[all...]
/titanic_51/usr/src/uts/common/vm/
H A Dseg_vn.h69 * The read/write segment lock protects all of segvn_data including the
72 * The "write" version of the segment lock, however, is required in order to
81 * is written by acquiring either the readers lock on the segment and
82 * freemem lock, or any lock combination which guarantees exclusive use
83 * of this segment (e.g., adress space writers lock,
84 * address space readers lock + segment writers lock).
87 krwlock_t lock; /* protect segvn_data and vpage array */ member
88 kmutex_t segfree_syncmtx; /* barrier lock fo
136 SEGVN_LOCK_ENTER(as,lock,type) global() argument
137 SEGVN_LOCK_EXIT(as,lock) global() argument
138 SEGVN_LOCK_DOWNGRADE(as,lock) global() argument
139 SEGVN_LOCK_TRYENTER(as,lock,type) global() argument
144 SEGVN_LOCK_HELD(as,lock) global() argument
145 SEGVN_READ_HELD(as,lock) global() argument
146 SEGVN_WRITE_HELD(as,lock) global() argument
[all...]
/titanic_51/usr/src/uts/common/syscall/
H A Dsem.c80 * ID lock). The former is used by semop, where a lookup is performed
92 * Avoiding a lock ordering violation between p_lock and the ID lock,
97 * sem_rmid, holding the ID lock, iterates through all undo structures
110 * takes the appropriate semaphore's ID lock (always legal since the
114 * semaphore, drops the ID lock, and frees the undo structure.
355 kmutex_t *lock; in semctl() local
378 if ((lock = ipc_lookup(sem_svc, semid, in semctl()
382 mutex_exit(lock); in semctl()
399 if ((lock in semctl()
668 kmutex_t *lock; semget() local
806 sem_undo_alloc(proc_t * pp,ksemid_t * sp,kmutex_t ** lock,struct sem_undo * template,struct sem_undo ** un) sem_undo_alloc() argument
866 kmutex_t *lock; semop() local
[all...]
/titanic_51/usr/src/lib/libdevinfo/
H A Ddevinfo_dli.c94 flock_t lock; in di_dli_open() local
118 bzero(&lock, sizeof (lock)); in di_dli_open()
119 lock.l_type = l_type; in di_dli_open()
120 if (fcntl(fd, F_SETLKW, &lock) < 0) { in di_dli_open()
150 flock_t lock; in di_dli_close() local
154 bzero(&lock, sizeof (lock)); in di_dli_close()
155 lock.l_type = F_UNLCK; in di_dli_close()
156 (void) fcntl(fd, F_SETLK, &lock); in di_dli_close()
[all...]

12345678910>>...28