Lines Matching +full:lock +full:- +full:state
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
29 * Machine independent bits of reader/writer lock implementation.
41 #include <sys/lock.h>
59 PMC_SOFT_DECLARE( , , lock, failed);
63 * Return the rwlock address when the lock cookie address is provided.
71 static void db_show_rwlock(const struct lock_object *lock);
73 static void assert_rw(const struct lock_object *lock, int what);
74 static void lock_rw(struct lock_object *lock, uintptr_t how);
75 static int trylock_rw(struct lock_object *lock, uintptr_t how);
77 static int owner_rw(const struct lock_object *lock, struct thread **owner);
79 static uintptr_t unlock_rw(struct lock_object *lock);
130 * Return a pointer to the owning thread if the lock is write-locked or
131 * NULL if the lock is unlocked or read-locked.
144 #define rw_recursed(rw) ((rw)->rw_recurse != 0)
147 * Return true if curthread helds the lock.
152 * Return a pointer to the owning thread for this lock who should receive
153 * any priority lent by threads that block on this lock. Currently this
163 assert_rw(const struct lock_object *lock, int what) in assert_rw() argument
166 rw_assert((const struct rwlock *)lock, what); in assert_rw()
170 lock_rw(struct lock_object *lock, uintptr_t how) in lock_rw() argument
174 rw = (struct rwlock *)lock; in lock_rw()
182 trylock_rw(struct lock_object *lock, uintptr_t how) in trylock_rw() argument
186 rw = (struct rwlock *)lock; in trylock_rw()
194 unlock_rw(struct lock_object *lock) in unlock_rw() argument
198 rw = (struct rwlock *)lock; in unlock_rw()
200 if (rw->rw_lock & RW_LOCK_READ) { in unlock_rw()
211 owner_rw(const struct lock_object *lock, struct thread **owner) in owner_rw() argument
213 const struct rwlock *rw = (const struct rwlock *)lock; in owner_rw()
214 uintptr_t x = rw->rw_lock; in owner_rw()
232 ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, in _rw_init_flags()
234 &rw->rw_lock)); in _rw_init_flags()
250 lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags); in _rw_init_flags()
251 rw->rw_lock = RW_UNLOCKED; in _rw_init_flags()
252 rw->rw_recurse = 0; in _rw_init_flags()
262 KASSERT(rw->rw_lock == RW_UNLOCKED, ("rw lock %p not unlocked", rw)); in _rw_destroy()
263 KASSERT(rw->rw_recurse == 0, ("rw lock %p still recursed", rw)); in _rw_destroy()
264 rw->rw_lock = RW_DESTROYED; in _rw_destroy()
265 lock_destroy(&rw->lock_object); in _rw_destroy()
274 rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc, in rw_sysinit()
275 args->ra_flags); in rw_sysinit()
297 KASSERT(rw->rw_lock != RW_DESTROYED, in _rw_wlock_cookie()
299 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, in _rw_wlock_cookie()
309 LOCK_LOG_LOCK("WLOCK", &rw->lock_object, 0, rw->rw_recurse, file, line); in _rw_wlock_cookie()
310 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); in _rw_wlock_cookie()
330 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_try_wlock_int()
337 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) in __rw_try_wlock_int()
341 if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) { in __rw_try_wlock_int()
342 rw->rw_recurse++; in __rw_try_wlock_int()
343 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); in __rw_try_wlock_int()
350 LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); in __rw_try_wlock_int()
352 WITNESS_LOCK(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, in __rw_try_wlock_int()
378 KASSERT(rw->rw_lock != RW_DESTROYED, in _rw_wunlock_cookie()
381 WITNESS_UNLOCK(&rw->lock_object, LOP_EXCLUSIVE, file, line); in _rw_wunlock_cookie()
382 LOCK_LOG_LOCK("WUNLOCK", &rw->lock_object, 0, rw->rw_recurse, file, in _rw_wunlock_cookie()
395 * Determines whether a new reader can acquire a lock. Succeeds if the
396 * reader already owns a read lock and the lock is locked for read to
397 * prevent deadlock from reader recursion. Also succeeds if the lock
408 if (!fp && td->td_rw_rlocks && (v & RW_LOCK_READ)) in __rw_can_read()
420 * lock, then try to bump up the count of read locks. Note in __rw_rlock_try()
421 * that we have to preserve the current state of the in __rw_rlock_try()
423 * read lock, then rw_lock must have changed, so restart in __rw_rlock_try()
425 * completely unlocked rwlock since such a lock is encoded in __rw_rlock_try()
426 * as a read lock with no waiters. in __rw_rlock_try()
429 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, vp, in __rw_rlock_try()
431 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_try()
433 "%s: %p succeed %p -> %p", __func__, in __rw_rlock_try()
436 td->td_rw_rlocks++; in __rw_rlock_try()
464 uintptr_t state = 0; in __rw_rlock_hard() local
475 all_time -= lockstat_nsecs(&rw->lock_object); in __rw_rlock_hard()
477 state = v; in __rw_rlock_hard()
493 PMC_SOFT_CALL( , , lock, failed); in __rw_rlock_hard()
495 lock_profile_obtain_lock_failed(&rw->lock_object, false, in __rw_rlock_hard()
498 THREAD_CONTENDS_ON_LOCK(&rw->lock_object); in __rw_rlock_hard()
510 * the owner stops running or the state of the lock in __rw_rlock_hard()
516 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_hard()
522 "lockname:\"%s\"", rw->lock_object.lo_name); in __rw_rlock_hard()
543 rw->lock_object.lo_name); in __rw_rlock_hard()
557 lda.spin_cnt += rowner_loops - i; in __rw_rlock_hard()
569 * has a write lock or there are write waiters present, in __rw_rlock_hard()
570 * acquire the turnstile lock so we can begin the process in __rw_rlock_hard()
573 ts = turnstile_trywait(&rw->lock_object); in __rw_rlock_hard()
576 * The lock might have been released while we spun, so in __rw_rlock_hard()
577 * recheck its state and restart the loop if needed. in __rw_rlock_hard()
591 * The current lock owner might have started executing in __rw_rlock_hard()
592 * on another CPU (or the lock could have changed in __rw_rlock_hard()
594 * chain lock. If so, drop the turnstile lock and try in __rw_rlock_hard()
606 * The lock is held in write mode or it already has waiters. in __rw_rlock_hard()
614 * lock and restart the loop. in __rw_rlock_hard()
617 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, in __rw_rlock_hard()
620 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_hard()
626 * We were unable to acquire the lock and the read waiters in __rw_rlock_hard()
629 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_hard()
633 sleep_time -= lockstat_nsecs(&rw->lock_object); in __rw_rlock_hard()
638 sleep_time += lockstat_nsecs(&rw->lock_object); in __rw_rlock_hard()
641 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_rlock_hard()
646 THREAD_CONTENTION_DONE(&rw->lock_object); in __rw_rlock_hard()
652 all_time += lockstat_nsecs(&rw->lock_object); in __rw_rlock_hard()
655 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, in __rw_rlock_hard()
656 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); in __rw_rlock_hard()
660 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, in __rw_rlock_hard()
661 LOCKSTAT_READER, (state & RW_LOCK_READ) == 0, in __rw_rlock_hard()
662 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); in __rw_rlock_hard()
686 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_rlock_int()
691 WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); in __rw_rlock_int()
698 lock_profile_obtain_lock_success(&rw->lock_object, false, 0, 0, in __rw_rlock_int()
701 LOCK_LOG_LOCK("RLOCK", &rw->lock_object, 0, 0, file, line); in __rw_rlock_int()
702 WITNESS_LOCK(&rw->lock_object, 0, file, line); in __rw_rlock_int()
727 x = rw->rw_lock; in __rw_try_rlock_int()
729 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_try_rlock_int()
734 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &x, x + RW_ONE_READER)) { in __rw_try_rlock_int()
735 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 1, file, in __rw_try_rlock_int()
737 WITNESS_LOCK(&rw->lock_object, LOP_TRYLOCK, file, line); in __rw_try_rlock_int()
741 curthread->td_rw_rlocks++; in __rw_try_rlock_int()
746 LOCK_LOG_TRY("RLOCK", &rw->lock_object, 0, 0, file, line); in __rw_try_rlock_int()
765 if (atomic_fcmpset_rel_ptr(&rw->rw_lock, vp, in __rw_runlock_try()
766 *vp - RW_ONE_READER)) { in __rw_runlock_try()
767 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_runlock_try()
769 "%s: %p succeeded %p -> %p", in __rw_runlock_try()
771 (void *)(*vp - RW_ONE_READER)); in __rw_runlock_try()
772 td->td_rw_rlocks--; in __rw_runlock_try()
797 * last reader, so grab the turnstile lock. in __rw_runlock_hard()
799 turnstile_chain_lock(&rw->lock_object); in __rw_runlock_hard()
808 * Try to drop our lock leaving the lock in a unlocked in __rw_runlock_hard()
809 * state. in __rw_runlock_hard()
811 * If you wanted to do explicit lock handoff you'd have to in __rw_runlock_hard()
814 * priority thread blocks on the write lock before the in __rw_runlock_hard()
816 * "steal" the lock. For now it's a lot simpler to just in __rw_runlock_hard()
820 * acquired a read lock, so drop the turnstile lock and in __rw_runlock_hard()
830 if (!atomic_fcmpset_rel_ptr(&rw->rw_lock, &v, setv)) in __rw_runlock_hard()
832 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_runlock_hard()
837 * Ok. The lock is released and all that's left is to in __rw_runlock_hard()
838 * wake up the waiters. Note that the lock might not be in __rw_runlock_hard()
840 * block again if they run before the new lock holder(s) in __rw_runlock_hard()
841 * release the lock. in __rw_runlock_hard()
843 ts = turnstile_lookup(&rw->lock_object); in __rw_runlock_hard()
847 td->td_rw_rlocks--; in __rw_runlock_hard()
850 turnstile_chain_unlock(&rw->lock_object); in __rw_runlock_hard()
861 KASSERT(rw->rw_lock != RW_DESTROYED, in _rw_runlock_cookie_int()
863 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line); in _rw_runlock_cookie_int()
864 WITNESS_UNLOCK(&rw->lock_object, 0, file, line); in _rw_runlock_cookie_int()
865 LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); in _rw_runlock_cookie_int()
874 lock_profile_release_lock(&rw->lock_object, false); in _rw_runlock_cookie_int()
898 (*extra_work)--; in rw_drop_critical()
906 * This function is called when we are unable to obtain a write lock on the
908 * read or write lock.
935 uintptr_t state = 0; in __rw_wlock_hard() local
953 all_time -= lockstat_nsecs(&rw->lock_object); in __rw_wlock_hard()
955 state = v; in __rw_wlock_hard()
969 KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, in __rw_wlock_hard()
970 ("%s: recursing but non-recursive rw %p @ %s:%d\n", in __rw_wlock_hard()
972 rw->rw_recurse++; in __rw_wlock_hard()
973 atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); in __rw_wlock_hard()
974 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
979 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
980 CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, in __rw_wlock_hard()
981 rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); in __rw_wlock_hard()
990 PMC_SOFT_CALL( , , lock, failed); in __rw_wlock_hard()
992 lock_profile_obtain_lock_failed(&rw->lock_object, false, in __rw_wlock_hard()
995 THREAD_CONTENDS_ON_LOCK(&rw->lock_object); in __rw_wlock_hard()
1009 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) in __rw_wlock_hard()
1015 * If the lock is write locked and the owner is in __rw_wlock_hard()
1017 * running or the state of the lock changes. in __rw_wlock_hard()
1025 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
1030 rw->lock_object.lo_name); in __rw_wlock_hard()
1049 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, in __rw_wlock_hard()
1053 extra_work--; in __rw_wlock_hard()
1060 rw->lock_object.lo_name); in __rw_wlock_hard()
1083 ts = turnstile_trywait(&rw->lock_object); in __rw_wlock_hard()
1090 * The current lock owner might have started executing in __rw_wlock_hard()
1091 * on another CPU (or the lock could have changed in __rw_wlock_hard()
1093 * chain lock. If so, drop the turnstile lock and try in __rw_wlock_hard()
1110 * If the lock was released, without maintain any pending in __rw_wlock_hard()
1112 * If a pending waiters queue is present, claim the lock in __rw_wlock_hard()
1118 if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid | setv)) { in __rw_wlock_hard()
1134 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, setv)) in __rw_wlock_hard()
1139 extra_work--; in __rw_wlock_hard()
1148 if (!atomic_fcmpset_ptr(&rw->rw_lock, &v, in __rw_wlock_hard()
1151 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
1159 * We were unable to acquire the lock and the write waiters in __rw_wlock_hard()
1162 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
1166 sleep_time -= lockstat_nsecs(&rw->lock_object); in __rw_wlock_hard()
1171 sleep_time += lockstat_nsecs(&rw->lock_object); in __rw_wlock_hard()
1174 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wlock_hard()
1182 THREAD_CONTENTION_DONE(&rw->lock_object); in __rw_wlock_hard()
1194 all_time += lockstat_nsecs(&rw->lock_object); in __rw_wlock_hard()
1197 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, in __rw_wlock_hard()
1198 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); in __rw_wlock_hard()
1202 LOCKSTAT_RECORD4(rw__spin, rw, all_time - sleep_time, in __rw_wlock_hard()
1203 LOCKSTAT_WRITER, (state & RW_LOCK_READ) == 0, in __rw_wlock_hard()
1204 (state & RW_LOCK_READ) == 0 ? 0 : RW_READERS(state)); in __rw_wlock_hard()
1213 * a write lock failed. The latter means that the lock is recursed or one of
1215 * on this lock.
1234 if (--(rw->rw_recurse) == 0) in __rw_wunlock_hard()
1235 atomic_clear_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); in __rw_wunlock_hard()
1236 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wunlock_hard()
1245 KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), in __rw_wunlock_hard()
1248 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wunlock_hard()
1251 turnstile_chain_lock(&rw->lock_object); in __rw_wunlock_hard()
1258 * have waiters on both queues, we need to preserve the state of in __rw_wunlock_hard()
1264 * new writer comes in before a reader it will claim the lock up in __rw_wunlock_hard()
1267 * of waiters or doing some complicated lock handoff gymnastics. in __rw_wunlock_hard()
1276 atomic_store_rel_ptr(&rw->rw_lock, setv); in __rw_wunlock_hard()
1279 if (LOCK_LOG_TEST(&rw->lock_object, 0)) in __rw_wunlock_hard()
1283 ts = turnstile_lookup(&rw->lock_object); in __rw_wunlock_hard()
1287 turnstile_chain_unlock(&rw->lock_object); in __rw_wunlock_hard()
1291 * Attempt to do a non-blocking upgrade from a read lock to a write
1292 * lock. This will only succeed if this thread holds a single read
1293 * lock. Returns true if the upgrade succeeded and false otherwise.
1305 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_try_upgrade_int()
1308 __rw_assert(&rw->rw_lock, RA_RLOCKED, file, line); in __rw_try_upgrade_int()
1312 * are any write waiters, then we will have to lock the in __rw_try_upgrade_int()
1324 success = atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid); in __rw_try_upgrade_int()
1331 * Ok, we think we have waiters, so lock the turnstile. in __rw_try_upgrade_int()
1333 ts = turnstile_trywait(&rw->lock_object); in __rw_try_upgrade_int()
1342 * we honor the current state of the waiters flags. in __rw_try_upgrade_int()
1343 * If we obtain the lock with the flags set, then claim in __rw_try_upgrade_int()
1347 success = atomic_fcmpset_ptr(&rw->rw_lock, &v, setv); in __rw_try_upgrade_int()
1357 LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); in __rw_try_upgrade_int()
1359 curthread->td_rw_rlocks--; in __rw_try_upgrade_int()
1360 WITNESS_UPGRADE(&rw->lock_object, LOP_EXCLUSIVE | LOP_TRYLOCK, in __rw_try_upgrade_int()
1377 * Downgrade a write lock into a single read lock.
1389 KASSERT(rw->rw_lock != RW_DESTROYED, in __rw_downgrade_int()
1391 __rw_assert(&rw->rw_lock, RA_WLOCKED | RA_NOTRECURSED, file, line); in __rw_downgrade_int()
1394 panic("downgrade of a recursed lock"); in __rw_downgrade_int()
1397 WITNESS_DOWNGRADE(&rw->lock_object, 0, file, line); in __rw_downgrade_int()
1402 * lock the turnstile and "disown" the lock. in __rw_downgrade_int()
1405 if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) in __rw_downgrade_int()
1409 * Ok, we think we have waiters, so lock the turnstile so we can in __rw_downgrade_int()
1412 turnstile_chain_lock(&rw->lock_object); in __rw_downgrade_int()
1413 v = rw->rw_lock & RW_LOCK_WAITERS; in __rw_downgrade_int()
1419 * Downgrade from a write lock while preserving waiters flag in __rw_downgrade_int()
1422 ts = turnstile_lookup(&rw->lock_object); in __rw_downgrade_int()
1426 atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); in __rw_downgrade_int()
1429 * won't be able to acquire the lock anyway. in __rw_downgrade_int()
1436 turnstile_chain_unlock(&rw->lock_object); in __rw_downgrade_int()
1438 curthread->td_rw_rlocks++; in __rw_downgrade_int()
1439 LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); in __rw_downgrade_int()
1458 * In the non-WITNESS case, rw_assert() can only detect that at least
1480 witness_assert(&rw->lock_object, what, file, line); in __rw_assert()
1483 * If some other thread has a write lock or we have one in __rw_assert()
1484 * and are asserting a read lock, fail. Also, if no one in __rw_assert()
1485 * has a lock at all, fail. in __rw_assert()
1487 if (rw->rw_lock == RW_UNLOCKED || in __rw_assert()
1488 (!(rw->rw_lock & RW_LOCK_READ) && (what & RA_RLOCKED || in __rw_assert()
1490 panic("Lock %s not %slocked @ %s:%d\n", in __rw_assert()
1491 rw->lock_object.lo_name, (what & RA_RLOCKED) ? in __rw_assert()
1494 if (!(rw->rw_lock & RW_LOCK_READ) && !(what & RA_RLOCKED)) { in __rw_assert()
1497 panic("Lock %s recursed @ %s:%d\n", in __rw_assert()
1498 rw->lock_object.lo_name, file, in __rw_assert()
1501 panic("Lock %s not recursed @ %s:%d\n", in __rw_assert()
1502 rw->lock_object.lo_name, file, line); in __rw_assert()
1510 panic("Lock %s not exclusively locked @ %s:%d\n", in __rw_assert()
1511 rw->lock_object.lo_name, file, line); in __rw_assert()
1514 panic("Lock %s recursed @ %s:%d\n", in __rw_assert()
1515 rw->lock_object.lo_name, file, line); in __rw_assert()
1517 panic("Lock %s not recursed @ %s:%d\n", in __rw_assert()
1518 rw->lock_object.lo_name, file, line); in __rw_assert()
1522 witness_assert(&rw->lock_object, what, file, line); in __rw_assert()
1525 * If we hold a write lock fail. We can't reliably check in __rw_assert()
1526 * to see if we hold a read lock or not. in __rw_assert()
1529 panic("Lock %s exclusively locked @ %s:%d\n", in __rw_assert()
1530 rw->lock_object.lo_name, file, line); in __rw_assert()
1534 panic("Unknown rw lock assertion: %d @ %s:%d", what, file, in __rw_assert()
1542 db_show_rwlock(const struct lock_object *lock) in db_show_rwlock() argument
1547 rw = (const struct rwlock *)lock; in db_show_rwlock()
1549 db_printf(" state: "); in db_show_rwlock()
1550 if (rw->rw_lock == RW_UNLOCKED) in db_show_rwlock()
1552 else if (rw->rw_lock == RW_DESTROYED) { in db_show_rwlock()
1555 } else if (rw->rw_lock & RW_LOCK_READ) in db_show_rwlock()
1557 (uintmax_t)(RW_READERS(rw->rw_lock))); in db_show_rwlock()
1561 td->td_tid, td->td_proc->p_pid, td->td_name); in db_show_rwlock()
1563 db_printf(" recursed: %u\n", rw->rw_recurse); in db_show_rwlock()
1566 switch (rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)) { in db_show_rwlock()