/freebsd/sys/dev/drm2/ttm/ |
H A D | ttm_lock.c | 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 48 void ttm_lock_init(struct ttm_lock *lock) in ttm_lock_init() argument 50 mtx_init(&lock->lock, "ttmlk", NULL, MTX_DEF); in ttm_lock_init() 51 lock->rw = 0; in ttm_lock_init() 52 lock->flags = 0; in ttm_lock_init() 53 lock->kill_takers = false; in ttm_lock_init() 54 lock->signal = SIGKILL; in ttm_lock_init() 68 void ttm_read_unlock(struct ttm_lock *lock) in ttm_read_unlock() argument [all …]
|
H A D | ttm_lock.h | 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 33 * of the DRM heavyweight hardware lock. 34 * The lock is a read-write lock. Taking it in read mode and write mode 35 * is relatively fast, and intended for in-kernel use only. 38 * user-space processes from validating buffers. 39 * It's allowed to leave kernel space with the vt lock held. 40 * If a user-space process dies while having the vt-lock, 41 * it will be released during the file descriptor release. The vt lock [all …]
|
/freebsd/sys/dev/drm2/ |
H A D | drm_lock.c | 46 * Lock ioctl. 54 * Add the current task to the lock wait queue, and attempt to take to lock. 58 struct drm_lock *lock = data; in drm_lock() local 59 struct drm_master *master = file_priv->master; in drm_lock() 62 ++file_priv->lock_count; in drm_lock() 64 if (lock->context == DRM_KERNEL_CONTEXT) { in drm_lock() 66 DRM_CURRENTPID, lock->context); in drm_lock() 67 return -EINVAL; in drm_lock() 70 DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", in drm_lock() 71 lock->context, DRM_CURRENTPID, in drm_lock() [all …]
|
/freebsd/contrib/ntp/sntp/libevent/ |
H A D | evthread.c | 2 * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson 27 #include "event2/event-config.h" 28 #include "evconfig-private.h" 37 #include "log-internal.h" 38 #include "mm-internal.h" 39 #include "util-internal.h" 40 #include "evthread-internal.h" 106 if (target->alloc) in evthread_set_lock_callbacks() 107 event_warnx("Trying to disable lock functions after " in evthread_set_lock_callbacks() 112 if (target->alloc) { in evthread_set_lock_callbacks() [all …]
|
/freebsd/contrib/libevent/ |
H A D | evthread.c | 2 * Copyright (c) 2008-2012 Niels Provos, Nick Mathewson 27 #include "event2/event-config.h" 28 #include "evconfig-private.h" 37 #include "log-internal.h" 38 #include "mm-internal.h" 39 #include "util-internal.h" 40 #include "evthread-internal.h" 106 if (target->alloc) in evthread_set_lock_callbacks() 107 event_warnx("Trying to disable lock functions after " in evthread_set_lock_callbacks() 112 if (target->alloc) { in evthread_set_lock_callbacks() [all …]
|
/freebsd/share/man/man9/ |
H A D | lock.9 | 53 .Fn lockinit "struct lock *lkp" "int prio" "const char *wmesg" "int timo" "int flags" 55 .Fn lockdestroy "struct lock *lkp" 57 .Fn lockmgr "struct lock *lkp" "u_int flags" "struct mtx *ilk" 59 .Fn lockmgr_args "struct lock *lkp" "u_int flags" "struct mtx *ilk" "const char *wmesg" "int prio" … 61 .Fn lockmgr_args_rw "struct lock *lkp" "u_int flags" "struct rwlock *ilk" "const char *wmesg" "int … 63 .Fn lockmgr_disown "struct lock *lkp" 65 .Fn lockmgr_disowned "const struct lock *lkp" 67 .Fn lockmgr_lock_flags "struct lock *lkp" "u_int flags" "struct lock_object *ilk" "const char *file… 69 .Fn lockmgr_printinfo "const struct lock *lkp" 71 .Fn lockmgr_recursed "const struct lock *lkp" [all …]
|
H A D | rmlock.9 | 51 .Nd kernel reader/writer lock optimized for read-mostly access patterns 97 Read-mostly locks allow shared access to protected data by multiple threads, 106 Read-mostly locks are designed to be efficient for locks almost exclusively 109 Acquiring an exclusive lock after the lock has been locked for shared access 112 Normal read-mostly locks are similar to 114 locks and follow the same lock ordering rules as 117 Read-mostly locks have full priority propagation like mutexes. 120 read-mostly locks propagate priority to both readers and writers. 127 Readers can recurse if the lock is initialized with the 136 It changes lock ordering rules to the same as for [all …]
|
H A D | sx.9 | 52 .Nd kernel shared/exclusive lock 116 is a pointer to a null-terminated character string that describes the 117 shared/exclusive lock. 125 .Bl -tag -width SX_NOWITNESS 131 to ignore this lock. 133 Do not profile this lock. 138 Do not log any operations for this lock via 153 The lock 157 Threads acquire and release a shared lock by calling 166 Threads acquire and release an exclusive lock by calling [all …]
|
H A D | rwlock.9 | 48 .Nd kernel reader/writer lock 111 can be locked while holding a non-spin mutex, and an 123 .Bl -tag -width indent 127 as reader/writer lock, described by name 131 on the lock. 133 Initialize the rw lock just like the 142 .Bl -tag -width ".Dv RW_NOPROFILE" 146 Do not profile this lock. 150 to ignore this lock. 152 Do not log any operations for this lock via [all …]
|
/freebsd/sys/kern/ |
H A D | kern_lockf.c | 1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 29 /*- 70 #include <sys/lock.h> 92 static MALLOC_DEFINE(M_LOCKF, "lockf", "Byte-range locking structures"); 172 * This structure is used to keep track of both local and remote lock 174 * the lock owner structure. Each possible lock owner (local proc for 179 * If a lock owner has a lock that blocks some other lock or a lock 180 * that is waiting for some other lock, it also has a vertex in the 184 * (s) locked by state->ls_lock [all …]
|
H A D | kern_rangelock.c | 1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 36 #include <sys/lock.h> 62 * written single-threaded, and then read by many processes. 64 * Lock is in cheat mode when RL_CHEAT_CHEATING bit is set in the 65 * lock->head. Special cookies are returned in this mode, and 86 rangelock_cheat_drain(struct rangelock *lock) in rangelock_cheat_drain() argument 92 v = atomic_load_ptr(&lock->head); in rangelock_cheat_drain() 95 sleepq_add(&lock->head, NULL, "ranged1", 0, 0); in rangelock_cheat_drain() 96 sleepq_wait(&lock->head, PRI_USER); in rangelock_cheat_drain() [all …]
|
H A D | subr_witness.c | 1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 38 * Implementation of the `witness' lock verifier. Originally implemented for 39 * mutexes in BSD/OS. Extended to handle generic lock objects and lock 45 * Pronunciation: 'wit-n&s 59 * life -- Pilot> 64 * Special rules concerning Giant and lock orders: 69 * 2) Giant must be released when blocking on a sleepable lock. 73 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule 79 * a sleepable lock because it is a non-sleepable lock and non-sleepable [all …]
|
H A D | kern_condvar.c | 1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 35 #include <sys/lock.h> 58 if ((cvp)->cv_waiters < CV_WAITERS_BOUND) \ 59 (cvp)->cv_waiters++; \ 65 #define CV_ASSERT(cvp, lock, td) do { \ argument 69 KASSERT((lock) ! 108 _cv_wait(struct cv * cvp,struct lock_object * lock) _cv_wait() argument 171 _cv_wait_unlock(struct cv * cvp,struct lock_object * lock) _cv_wait_unlock() argument 228 _cv_wait_sig(struct cv * cvp,struct lock_object * lock) _cv_wait_sig() argument 296 _cv_timedwait_sbt(struct cv * cvp,struct lock_object * lock,sbintime_t sbt,sbintime_t pr,int flags) _cv_timedwait_sbt() argument 366 _cv_timedwait_sig_sbt(struct cv * cvp,struct lock_object * lock,sbintime_t sbt,sbintime_t pr,int flags) _cv_timedwait_sig_sbt() argument [all...] |
H A D | subr_turnstile.c | 1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 36 * non-sleepable locks. Sleepable locks use condition variables to 38 * turnstile queue's are assigned to a lock held by an owning thread. Thus, 43 * want to use back-pointers in the locks for the same reason. Thus, we 46 * in a hash table based on the address of the lock. Each entry in the 47 * hash table is a linked-lists of turnstiles and is called a turnstile 52 * and attached to that thread. When a thread blocks on a lock, if it is the 53 * first thread to block, it lends its turnstile to the lock. If the lock 54 * already has a turnstile, then it gives its turnstile to the lock's [all …]
|
/freebsd/crypto/heimdal/lib/hx509/ |
H A D | lock.c | 2 * Copyright (c) 2005 - 2006 Kungliga Tekniska Högskolan 60 hx509_lock_init(hx509_context context, hx509_lock *lock) in hx509_lock_init() argument 65 *lock = NULL; in hx509_lock_init() 72 "MEMORY:locks-internal", in hx509_lock_init() 75 &l->certs); in hx509_lock_init() 81 *lock = l; in hx509_lock_init() 87 hx509_lock_add_password(hx509_lock lock, const char *password) in hx509_lock_add_password() argument 96 d = realloc(lock->password.val, in hx509_lock_add_password() 97 (lock->password.len + 1) * sizeof(lock->password.val[0])); in hx509_lock_add_password() 102 lock->password.val = d; in hx509_lock_add_password() [all …]
|
/freebsd/share/man/man4/ |
H A D | dtrace_lockstat.4 | 1 .\" Copyright (c) 2017 George V. Neville-Neil <gnn@FreeBSD.org> 32 .Fn lockstat:::adaptive-acquire "struct mtx *" 33 .Fn lockstat:::adaptive-release "struct mtx *" 34 .Fn lockstat:::adaptive-spin "struct mtx *" "uint64_t" 35 .Fn lockstat:::adaptive-block "struct mtx *" "uint64_t" 36 .Fn lockstat:::spin-acquire "struct mtx *" 37 .Fn lockstat:::spin-release "struct mtx *" 38 .Fn lockstat:::spin-spin "struct mtx *" "uint64_t" 39 .Fn lockstat:::rw-acquire "struct rwlock *" "int" 40 .Fn lockstat:::rw-release "struct rwlock *" "int" [all …]
|
/freebsd/contrib/llvm-project/openmp/runtime/src/ |
H A D | kmp_lock.h | 2 * kmp_lock.h -- lock header file 5 //===------- 139 KMP_TAS_LOCK_INITIALIZER(lock) global() argument 198 KMP_FUTEX_LOCK_INITIALIZER(lock) global() argument 277 KMP_TICKET_LOCK_INITIALIZER(lock) global() argument 515 KMP_BOOTSTRAP_LOCK_INITIALIZER(lock) global() argument 516 KMP_BOOTSTRAP_LOCK_INIT(lock) global() argument 549 KMP_LOCK_INIT(lock) global() argument 1119 kmp_user_lock_p lock; global() member [all...] |
/freebsd/sys/sys/ |
H A D | lock.h | 1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 44 * Lock classes. Each lock has a class which describes characteristics 48 * an error to perform any type of context switch while holding a spin lock. 49 * Also, for an individual lock to be recursable, its class must allow 50 * recursion and the lock itself must explicitly allow recursion. 52 * The 'lc_ddb_show' function pointer is used to dump class-specific 53 * data for the 'show lock' DDB command. The 'lc_lock' and 55 * to lock and unlock locks while blocking on a sleep queue. The 63 void (*lc_assert)(const struct lock_object *lock, int what); [all …]
|
/freebsd/contrib/ntp/libntp/lib/isc/ |
H A D | task.c | 2 * Copyright (C) 2004-2012 Internet Systems Consortium, Inc. ("ISC") 3 * Copyright (C) 1998-2003 Internet Software Consortium. 107 isc_mutex_t lock; member 108 /* Locked by task lock. */ 118 /* Locked by task manager lock. */ 127 #define TASK_SHUTTINGDOWN(t) (((t)->flags & TASK_F_SHUTTINGDOWN) \ 139 isc_mutex_t lock; member 144 /* Locked by task manager lock. */ 166 #define FINISHED(m) ((m)->exiting && EMPTY((m)->tasks)) 298 isc__taskmgr_t *manager = task->manager; in task_finished() [all …]
|
/freebsd/contrib/unbound/util/storage/ |
H A D | lruhash.c | 2 * util/storage/lruhash.c - hashtable, hash function, LRU keeping. 55 lock_quick_init(&array[i].lock); in bin_init() 56 lock_protect(&array[i].lock, &array[i], in bin_init() 71 lock_quick_init(&table->lock); in lruhash_create() 72 table->sizefunc = sizefunc; in lruhash_create() 73 table->compfunc = compfunc; in lruhash_create() 74 table->delkeyfunc = delkeyfunc; in lruhash_create() 75 table->deldatafunc = deldatafunc; in lruhash_create() 76 table->cb_arg = arg; in lruhash_create() 77 table->size = start_size; in lruhash_create() [all …]
|
/freebsd/usr.bin/lockf/ |
H A D | lockf.1 | 30 .Nd execute a command while holding a file lock 45 utility acquires an exclusive lock on a 51 While holding the lock, it executes a 59 releases the lock, and removes the 64 .Bx Ns -style 69 is not considered to constitute a lock. 90 This can be used to lock inside a shell script. 98 This will guarantee lock ordering, as well as implement 100 with concurrent unlink, drop and re-acquire activity. 104 option is not used, then no guarantees around lock ordering can be made. [all …]
|
/freebsd/sys/contrib/ck/include/ |
H A D | ck_elide.h | 2 * Copyright 2013-2015 Samy Al Bahra. 33 * non-TSO architectures with TM support. 41 * skip_-prefixed counters represent the number of consecutive 42 * elisions to forfeit. retry_-prefixed counters represent the 45 * _busy: Lock was busy 100 st->n_fallback++; in _ck_elide_fallback() 104 if (st->skip != 0) in _ck_elide_fallback() 109 st->skip = c->skip_busy; in _ck_elide_fallback() 110 *retry = c->retry_busy; in _ck_elide_fallback() 114 st->skip = c->skip_other; in _ck_elide_fallback() [all …]
|
/freebsd/crypto/openssl/crypto/ |
H A D | threads_win.c | 2 * Copyright 2016-2023 The OpenSSL Project Authors. All Rights Reserved. 20 * https://docs.microsoft.com/en-us/cpp/intrinsics/interlockedor-intrinsic-functions#requirements 35 SRWLOCK lock; member 42 CRYPTO_RWLOCK *lock; in CRYPTO_THREAD_lock_new() local 46 if ((lock = OPENSSL_zalloc(sizeof(CRYPTO_win_rwlock))) == NULL) in CRYPTO_THREAD_lock_new() 48 rwlock = lock; in CRYPTO_THREAD_lock_new() 49 InitializeSRWLock(&rwlock->lock); in CRYPTO_THREAD_lock_new() 52 if ((lock = OPENSSL_zalloc(sizeof(CRITICAL_SECTION))) == NULL) { in CRYPTO_THREAD_lock_new() 59 if (!InitializeCriticalSectionAndSpinCount(lock, 0x400)) { in CRYPTO_THREAD_lock_new() 60 OPENSSL_free(lock); in CRYPTO_THREAD_lock_new() [all …]
|
/freebsd/contrib/llvm-project/compiler-rt/lib/builtins/ |
H A D | atomic.c | 1 //===-- atomic.c - Implement support functions for atomic operations.------===// 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 7 //===----------------------------------------------------------------------===// 10 // arbitrary-sized memory locations. This design uses locks that should 19 // To avoid needing a per-object lock, this code allocates an array of 21 // For operations that must be atomic on two locations, the lower lock is 24 //===----------------------------------------------------------------------===// 32 // We use __builtin_mem* here to avoid dependencies on libc-provided headers. 46 /// Number of locks. This allocates one page on 32-bit platforms, two on 47 /// 64-bit. This can be specified externally if a different trade between [all …]
|
/freebsd/sys/contrib/ck/include/spinlock/ |
H A D | anderson.h | 2 * Copyright 2010-2015 Samy Al Bahra. 39 * This is an implementation of Anderson's array-based queuing lock. 52 char pad[CK_MD_CACHELINE - sizeof(unsigned int) * 3 - sizeof(void *)]; 58 ck_spinlock_anderson_init(struct ck_spinlock_anderson *lock, in ck_spinlock_anderson_init() argument 71 lock->slots = slots; in ck_spinlock_anderson_init() 72 lock->count = count; in ck_spinlock_anderson_init() 73 lock->mask = count - 1; in ck_spinlock_anderson_init() 74 lock->next = 0; in ck_spinlock_anderson_init() 78 * appropriate wrap-around value in the case of next slot counter in ck_spinlock_anderson_init() 81 if (count & (count - 1)) in ck_spinlock_anderson_init() [all …]
|