Lines Matching +full:usecase +full:- +full:specific

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
15 * 3. Neither the name of the author nor the names of any co-contributors
17 * without specific prior written permission.
67 (LIST_FIRST(&(rm)->rm_activeReaders) == RM_DESTROYED)
161 for (queue = pc->pc_rm_queue.rmq_next; in unlock_rm()
162 queue != &pc->pc_rm_queue; queue = queue->rmq_next) { in unlock_rm()
164 if ((tracker->rmp_rmlock == rm) && in unlock_rm()
165 (tracker->rmp_thread == td)) { in unlock_rm()
171 ("rm_priotracker is non-NULL when lock held in read mode")); in unlock_rm()
186 lc = LOCK_CLASS(&rm->rm_wlock_object); in owner_rm()
187 return (lc->lc_owner(&rm->rm_wlock_object, owner)); in owner_rm()
196 * Add or remove tracker from per-cpu list.
198 * The per-cpu list can be traversed at any time in forward direction from an
207 tracker->rmp_cpuQueue.rmq_prev = &pc->pc_rm_queue; in rm_tracker_add()
208 next = pc->pc_rm_queue.rmq_next; in rm_tracker_add()
209 tracker->rmp_cpuQueue.rmq_next = next; in rm_tracker_add()
212 next->rmq_prev = &tracker->rmp_cpuQueue; in rm_tracker_add()
215 pc->pc_rm_queue.rmq_next = &tracker->rmp_cpuQueue; in rm_tracker_add()
231 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; in rm_trackers_present()
232 queue = queue->rmq_next) { in rm_trackers_present()
234 if ((tracker->rmp_rmlock == rm) && (tracker->rmp_thread == td)) in rm_trackers_present()
245 next = tracker->rmp_cpuQueue.rmq_next; in rm_tracker_remove()
246 prev = tracker->rmp_cpuQueue.rmq_prev; in rm_tracker_remove()
249 next->rmq_prev = prev; in rm_tracker_remove()
252 prev->rmq_next = next; in rm_tracker_remove()
264 for (queue = pc->pc_rm_queue.rmq_next; queue != &pc->pc_rm_queue; in rm_cleanIPI()
265 queue = queue->rmq_next) { in rm_cleanIPI()
267 if (tracker->rmp_rmlock == rm && tracker->rmp_flags == 0) { in rm_cleanIPI()
268 tracker->rmp_flags = RMPF_ONQUEUE; in rm_cleanIPI()
270 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, in rm_cleanIPI()
292 rm->rm_writecpus = all_cpus; in rm_init_flags()
293 LIST_INIT(&rm->rm_activeReaders); in rm_init_flags()
298 sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", in rm_init_flags()
303 mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", in rm_init_flags()
306 lock_init(&rm->lock_object, lc, name, NULL, liflags); in rm_init_flags()
321 LIST_FIRST(&rm->rm_activeReaders) = RM_DESTROYED; in rm_destroy()
322 if (rm->lock_object.lo_flags & LO_SLEEPABLE) in rm_destroy()
323 sx_destroy(&rm->rm_lock_sx); in rm_destroy()
325 mtx_destroy(&rm->rm_lock_mtx); in rm_destroy()
326 lock_destroy(&rm->lock_object); in rm_destroy()
333 if (rm->lock_object.lo_flags & LO_SLEEPABLE) in rm_wowned()
334 return (sx_xlocked(&rm->rm_lock_sx)); in rm_wowned()
336 return (mtx_owned(&rm->rm_lock_mtx)); in rm_wowned()
345 rm_init_flags(args->ra_rm, args->ra_desc, args->ra_flags); in rm_sysinit()
357 if (!CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)) { in _rm_rlock_hard()
362 /* Remove our tracker from the per-cpu list. */ in _rm_rlock_hard()
370 if (tracker->rmp_flags) { in _rm_rlock_hard()
371 /* Just add back tracker - we hold the lock. */ in _rm_rlock_hard()
381 if ((rm->lock_object.lo_flags & LO_RECURSABLE) != 0) { in _rm_rlock_hard()
384 * for this lock on the per-cpu queue. in _rm_rlock_hard()
388 LIST_INSERT_HEAD(&rm->rm_activeReaders, tracker, in _rm_rlock_hard()
390 tracker->rmp_flags = RMPF_ONQUEUE; in _rm_rlock_hard()
402 if (rm->lock_object.lo_flags & LO_SLEEPABLE) { in _rm_rlock_hard()
403 if (!sx_try_xlock(&rm->rm_lock_sx)) in _rm_rlock_hard()
406 if (!mtx_trylock(&rm->rm_lock_mtx)) in _rm_rlock_hard()
410 if (rm->lock_object.lo_flags & LO_SLEEPABLE) { in _rm_rlock_hard()
412 sx_xlock(&rm->rm_lock_sx); in _rm_rlock_hard()
415 mtx_lock(&rm->rm_lock_mtx); in _rm_rlock_hard()
420 CPU_CLR(pc->pc_cpuid, &rm->rm_writecpus); in _rm_rlock_hard()
425 if (rm->lock_object.lo_flags & LO_SLEEPABLE) in _rm_rlock_hard()
426 sx_xunlock(&rm->rm_lock_sx); in _rm_rlock_hard()
428 mtx_unlock(&rm->rm_lock_mtx); in _rm_rlock_hard()
442 tracker->rmp_flags = 0; in _rm_rlock()
443 tracker->rmp_thread = td; in _rm_rlock()
444 tracker->rmp_rmlock = rm; in _rm_rlock()
446 if (rm->lock_object.lo_flags & LO_SLEEPABLE) in _rm_rlock()
449 td->td_critnest++; /* critical_enter(); */ in _rm_rlock()
452 pc = cpuid_to_pcpu[td->td_oncpu]; in _rm_rlock()
457 td->td_critnest--; in _rm_rlock()
463 if (__predict_true(0 == (td->td_owepreempt | in _rm_rlock()
464 CPU_ISSET(pc->pc_cpuid, &rm->rm_writecpus)))) in _rm_rlock()
475 if (td->td_owepreempt) { in _rm_unlock_hard()
476 td->td_critnest++; in _rm_unlock_hard()
480 if (!tracker->rmp_flags) in _rm_unlock_hard()
486 if (tracker->rmp_flags & RMPF_SIGNAL) { in _rm_unlock_hard()
490 rm = tracker->rmp_rmlock; in _rm_unlock_hard()
492 turnstile_chain_lock(&rm->lock_object); in _rm_unlock_hard()
495 ts = turnstile_lookup(&rm->lock_object); in _rm_unlock_hard()
499 turnstile_chain_unlock(&rm->lock_object); in _rm_unlock_hard()
508 struct thread *td = tracker->rmp_thread; in _rm_runlock()
513 td->td_critnest++; /* critical_enter(); */ in _rm_runlock()
516 pc = cpuid_to_pcpu[td->td_oncpu]; in _rm_runlock()
520 td->td_critnest--; in _rm_runlock()
523 if (rm->lock_object.lo_flags & LO_SLEEPABLE) in _rm_runlock()
526 if (__predict_true(0 == (td->td_owepreempt | tracker->rmp_flags))) in _rm_runlock()
542 if (rm->lock_object.lo_flags & LO_SLEEPABLE) in _rm_wlock()
543 sx_xlock(&rm->rm_lock_sx); in _rm_wlock()
545 mtx_lock(&rm->rm_lock_mtx); in _rm_wlock()
547 if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { in _rm_wlock()
550 CPU_ANDNOT(&readcpus, &readcpus, &rm->rm_writecpus); in _rm_wlock()
551 rm->rm_writecpus = all_cpus; in _rm_wlock()
554 * Assumes rm->rm_writecpus update is visible on other CPUs in _rm_wlock()
569 while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { in _rm_wlock()
570 ts = turnstile_trywait(&rm->lock_object); in _rm_wlock()
571 prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; in _rm_wlock()
573 turnstile_wait(ts, prio->rmp_thread, in _rm_wlock()
585 if (rm->lock_object.lo_flags & LO_SLEEPABLE) in _rm_wunlock()
586 sx_xunlock(&rm->rm_lock_sx); in _rm_wunlock()
588 mtx_unlock(&rm->rm_lock_mtx); in _rm_wunlock()
607 WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, in _rm_wlock_debug()
612 LOCK_LOG_LOCK("RMWLOCK", &rm->lock_object, 0, 0, file, line); in _rm_wlock_debug()
613 WITNESS_LOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); in _rm_wlock_debug()
627 WITNESS_UNLOCK(&rm->lock_object, LOP_EXCLUSIVE, file, line); in _rm_wunlock_debug()
628 LOCK_LOG_LOCK("RMWUNLOCK", &rm->lock_object, 0, 0, file, line); in _rm_wunlock_debug()
642 if (!(rm->lock_object.lo_flags & LO_RECURSABLE) && !trylock) { in _rm_rlock_debug()
646 ("rm_rlock: recursed on non-recursive rmlock %p @ %s:%d\n", in _rm_rlock_debug()
659 rm->lock_object.lo_name, file, line)); in _rm_rlock_debug()
660 WITNESS_CHECKORDER(&rm->lock_object, in _rm_rlock_debug()
666 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 1, file, in _rm_rlock_debug()
669 LOCK_LOG_LOCK("RMRLOCK", &rm->lock_object, 0, 0, file, in _rm_rlock_debug()
671 WITNESS_LOCK(&rm->lock_object, LOP_NOSLEEP, file, line); in _rm_rlock_debug()
675 LOCK_LOG_TRY("RMRLOCK", &rm->lock_object, 0, 0, file, line); in _rm_rlock_debug()
691 WITNESS_UNLOCK(&rm->lock_object, 0, file, line); in _rm_runlock_debug()
692 LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); in _rm_runlock_debug()
701 * the kernel - we are called from a kernel module.
760 * Handle the write-locked case. Unlike other in _rm_assert()
766 rm->lock_object.lo_name, file, line); in _rm_assert()
769 rm->lock_object.lo_name, file, line); in _rm_assert()
779 rm->lock_object.lo_name, (what & RA_RLOCKED) ? in _rm_assert()
784 rm->lock_object.lo_name, file, line); in _rm_assert()
787 rm->lock_object.lo_name, file, line); in _rm_assert()
792 rm->lock_object.lo_name, file, line); in _rm_assert()
797 rm->lock_object.lo_name, file, line); in _rm_assert()
805 rm->lock_object.lo_name, file, line); in _rm_assert()
820 td = tr->rmp_thread; in print_tracker()
821 db_printf(" thread %p (tid %d, pid %d, \"%s\") {", td, td->td_tid, in print_tracker()
822 td->td_proc->p_pid, td->td_name); in print_tracker()
823 if (tr->rmp_flags & RMPF_ONQUEUE) { in print_tracker()
825 if (tr->rmp_flags & RMPF_SIGNAL) in print_tracker()
843 ddb_display_cpuset(__DEQUALIFY(const cpuset_t *, &rm->rm_writecpus)); in db_show_rm()
845 db_printf(" per-CPU readers:\n"); in db_show_rm()
847 for (queue = pc->pc_rm_queue.rmq_next; in db_show_rm()
848 queue != &pc->pc_rm_queue; queue = queue->rmq_next) { in db_show_rm()
850 if (tr->rmp_rmlock == rm) in db_show_rm()
854 LIST_FOREACH(tr, &rm->rm_activeReaders, rmp_qentry) in db_show_rm()
856 lc = LOCK_CLASS(&rm->rm_wlock_object); in db_show_rm()
857 db_printf("Backing write-lock (%s):\n", lc->lc_name); in db_show_rm()
858 lc->lc_ddb_show(&rm->rm_wlock_object); in db_show_rm()
863 * Read-mostly sleepable locks.
869 * They are intended to be only used when write-locking is almost never needed
870 * (e.g., they can guard against unloading a kernel module) while read-locking
874 * of concern for your usecase, this is not the right primitive.
904 return (zpcpu_get(rms->pcpu)); in rms_int_pcpu()
911 return (zpcpu_get_cpu(rms->pcpu, cpu)); in rms_int_remote_pcpu()
919 MPASS(pcpu->influx == 0); in rms_int_influx_enter()
920 pcpu->influx = 1; in rms_int_influx_enter()
928 MPASS(pcpu->influx == 1); in rms_int_influx_exit()
929 pcpu->influx = 0; in rms_int_influx_exit()
937 old = atomic_fetchadd_int(&rms->debug_readers, 1); in rms_int_debug_readers_inc()
946 old = atomic_fetchadd_int(&rms->debug_readers, -1); in rms_int_debug_readers_dec()
967 pcpu->readers++; in rms_int_readers_inc()
976 pcpu->readers--; in rms_int_readers_dec()
986 rms->owner = RMS_NOOWNER; in rms_init()
987 rms->writers = 0; in rms_init()
988 rms->readers = 0; in rms_init()
989 rms->debug_readers = 0; in rms_init()
990 mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW); in rms_init()
991 rms->pcpu = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK | M_ZERO); in rms_init()
998 MPASS(rms->writers == 0); in rms_destroy()
999 MPASS(rms->readers == 0); in rms_destroy()
1000 mtx_destroy(&rms->mtx); in rms_destroy()
1001 uma_zfree_pcpu(pcpu_zone_8, rms->pcpu); in rms_destroy()
1011 mtx_lock(&rms->mtx); in rms_rlock_fallback()
1012 while (rms->writers > 0) in rms_rlock_fallback()
1013 msleep(&rms->readers, &rms->mtx, PRI_MAX_KERN, in rms_rlock_fallback()
1014 mtx_name(&rms->mtx), 0); in rms_rlock_fallback()
1017 mtx_unlock(&rms->mtx); in rms_rlock_fallback()
1028 MPASS(atomic_load_ptr(&rms->owner) != curthread); in rms_rlock()
1034 if (__predict_false(rms->writers > 0)) { in rms_rlock()
1051 MPASS(atomic_load_ptr(&rms->owner) != curthread); in rms_try_rlock()
1057 if (__predict_false(rms->writers > 0)) { in rms_try_rlock()
1078 mtx_lock(&rms->mtx); in rms_runlock_fallback()
1079 MPASS(rms->writers > 0); in rms_runlock_fallback()
1080 MPASS(rms->readers > 0); in rms_runlock_fallback()
1081 MPASS(rms->debug_readers == rms->readers); in rms_runlock_fallback()
1083 rms->readers--; in rms_runlock_fallback()
1084 if (rms->readers == 0) in rms_runlock_fallback()
1085 wakeup_one(&rms->writers); in rms_runlock_fallback()
1086 mtx_unlock(&rms->mtx); in rms_runlock_fallback()
1099 if (__predict_false(rms->writers > 0)) { in rms_runlock()
1124 rms = rmsipi->rms; in rms_action_func()
1127 if (pcpu->influx) in rms_action_func()
1129 if (pcpu->readers != 0) { in rms_action_func()
1130 atomic_add_int(&rms->readers, pcpu->readers); in rms_action_func()
1131 pcpu->readers = 0; in rms_action_func()
1144 rms = rmsipi->rms; in rms_wait_func()
1147 while (atomic_load_int(&pcpu->influx)) in rms_wait_func()
1160 if (pcpu->readers != 0) { in rms_assert_no_pcpu_readers()
1162 pcpu->readers, cpu); in rms_assert_no_pcpu_readers()
1178 MPASS(rms->readers == 0); in rms_wlock_switch()
1179 MPASS(rms->writers == 1); in rms_wlock_switch()
1196 MPASS(atomic_load_ptr(&rms->owner) != curthread); in rms_wlock()
1198 mtx_lock(&rms->mtx); in rms_wlock()
1199 rms->writers++; in rms_wlock()
1200 if (rms->writers > 1) { in rms_wlock()
1201 msleep(&rms->owner, &rms->mtx, PRI_MAX_KERN, in rms_wlock()
1202 mtx_name(&rms->mtx), 0); in rms_wlock()
1203 MPASS(rms->readers == 0); in rms_wlock()
1204 KASSERT(rms->owner == RMS_TRANSIENT, in rms_wlock()
1206 rms->owner)); in rms_wlock()
1210 KASSERT(rms->owner == RMS_NOOWNER, in rms_wlock()
1211 ("%s: unexpected owner value %p\n", __func__, rms->owner)); in rms_wlock()
1216 if (rms->readers > 0) { in rms_wlock()
1217 msleep(&rms->writers, &rms->mtx, PRI_MAX_KERN, in rms_wlock()
1218 mtx_name(&rms->mtx), 0); in rms_wlock()
1222 rms->owner = curthread; in rms_wlock()
1224 mtx_unlock(&rms->mtx); in rms_wlock()
1225 MPASS(rms->readers == 0); in rms_wlock()
1233 mtx_lock(&rms->mtx); in rms_wunlock()
1234 KASSERT(rms->owner == curthread, in rms_wunlock()
1235 ("%s: unexpected owner value %p\n", __func__, rms->owner)); in rms_wunlock()
1236 MPASS(rms->writers >= 1); in rms_wunlock()
1237 MPASS(rms->readers == 0); in rms_wunlock()
1238 rms->writers--; in rms_wunlock()
1239 if (rms->writers > 0) { in rms_wunlock()
1240 wakeup_one(&rms->owner); in rms_wunlock()
1241 rms->owner = RMS_TRANSIENT; in rms_wunlock()
1243 wakeup(&rms->readers); in rms_wunlock()
1244 rms->owner = RMS_NOOWNER; in rms_wunlock()
1246 mtx_unlock(&rms->mtx); in rms_wunlock()