Lines Matching refs:rms

900 rms_int_pcpu(struct rmslock *rms)  in rms_int_pcpu()  argument
904 return (zpcpu_get(rms->pcpu)); in rms_int_pcpu()
908 rms_int_remote_pcpu(struct rmslock *rms, int cpu) in rms_int_remote_pcpu() argument
911 return (zpcpu_get_cpu(rms->pcpu, cpu)); in rms_int_remote_pcpu()
915 rms_int_influx_enter(struct rmslock *rms, struct rmslock_pcpu *pcpu) in rms_int_influx_enter() argument
924 rms_int_influx_exit(struct rmslock *rms, struct rmslock_pcpu *pcpu) in rms_int_influx_exit() argument
934 rms_int_debug_readers_inc(struct rmslock *rms) in rms_int_debug_readers_inc() argument
937 old = atomic_fetchadd_int(&rms->debug_readers, 1); in rms_int_debug_readers_inc()
942 rms_int_debug_readers_dec(struct rmslock *rms) in rms_int_debug_readers_dec() argument
946 old = atomic_fetchadd_int(&rms->debug_readers, -1); in rms_int_debug_readers_dec()
951 rms_int_debug_readers_inc(struct rmslock *rms) in rms_int_debug_readers_inc() argument
956 rms_int_debug_readers_dec(struct rmslock *rms) in rms_int_debug_readers_dec() argument
962 rms_int_readers_inc(struct rmslock *rms, struct rmslock_pcpu *pcpu) in rms_int_readers_inc() argument
966 rms_int_debug_readers_inc(rms); in rms_int_readers_inc()
971 rms_int_readers_dec(struct rmslock *rms, struct rmslock_pcpu *pcpu) in rms_int_readers_dec() argument
975 rms_int_debug_readers_dec(rms); in rms_int_readers_dec()
983 rms_init(struct rmslock *rms, const char *name) in rms_init() argument
986 rms->owner = RMS_NOOWNER; in rms_init()
987 rms->writers = 0; in rms_init()
988 rms->readers = 0; in rms_init()
989 rms->debug_readers = 0; in rms_init()
990 mtx_init(&rms->mtx, name, NULL, MTX_DEF | MTX_NEW); in rms_init()
991 rms->pcpu = uma_zalloc_pcpu(pcpu_zone_8, M_WAITOK | M_ZERO); in rms_init()
995 rms_destroy(struct rmslock *rms) in rms_destroy() argument
998 MPASS(rms->writers == 0); in rms_destroy()
999 MPASS(rms->readers == 0); in rms_destroy()
1000 mtx_destroy(&rms->mtx); in rms_destroy()
1001 uma_zfree_pcpu(pcpu_zone_8, rms->pcpu); in rms_destroy()
1005 rms_rlock_fallback(struct rmslock *rms) in rms_rlock_fallback() argument
1008 rms_int_influx_exit(rms, rms_int_pcpu(rms)); in rms_rlock_fallback()
1011 mtx_lock(&rms->mtx); in rms_rlock_fallback()
1012 while (rms->writers > 0) in rms_rlock_fallback()
1013 msleep(&rms->readers, &rms->mtx, PRI_MAX_KERN, in rms_rlock_fallback()
1014 mtx_name(&rms->mtx), 0); in rms_rlock_fallback()
1016 rms_int_readers_inc(rms, rms_int_pcpu(rms)); in rms_rlock_fallback()
1017 mtx_unlock(&rms->mtx); in rms_rlock_fallback()
1023 rms_rlock(struct rmslock *rms) in rms_rlock() argument
1027 rms_assert_rlock_ok(rms); in rms_rlock()
1028 MPASS(atomic_load_ptr(&rms->owner) != curthread); in rms_rlock()
1031 pcpu = rms_int_pcpu(rms); in rms_rlock()
1032 rms_int_influx_enter(rms, pcpu); in rms_rlock()
1034 if (__predict_false(rms->writers > 0)) { in rms_rlock()
1035 rms_rlock_fallback(rms); in rms_rlock()
1039 rms_int_readers_inc(rms, pcpu); in rms_rlock()
1041 rms_int_influx_exit(rms, pcpu); in rms_rlock()
1047 rms_try_rlock(struct rmslock *rms) in rms_try_rlock() argument
1051 MPASS(atomic_load_ptr(&rms->owner) != curthread); in rms_try_rlock()
1054 pcpu = rms_int_pcpu(rms); in rms_try_rlock()
1055 rms_int_influx_enter(rms, pcpu); in rms_try_rlock()
1057 if (__predict_false(rms->writers > 0)) { in rms_try_rlock()
1058 rms_int_influx_exit(rms, pcpu); in rms_try_rlock()
1063 rms_int_readers_inc(rms, pcpu); in rms_try_rlock()
1065 rms_int_influx_exit(rms, pcpu); in rms_try_rlock()
1072 rms_runlock_fallback(struct rmslock *rms) in rms_runlock_fallback() argument
1075 rms_int_influx_exit(rms, rms_int_pcpu(rms)); in rms_runlock_fallback()
1078 mtx_lock(&rms->mtx); in rms_runlock_fallback()
1079 MPASS(rms->writers > 0); in rms_runlock_fallback()
1080 MPASS(rms->readers > 0); in rms_runlock_fallback()
1081 MPASS(rms->debug_readers == rms->readers); in rms_runlock_fallback()
1082 rms_int_debug_readers_dec(rms); in rms_runlock_fallback()
1083 rms->readers--; in rms_runlock_fallback()
1084 if (rms->readers == 0) in rms_runlock_fallback()
1085 wakeup_one(&rms->writers); in rms_runlock_fallback()
1086 mtx_unlock(&rms->mtx); in rms_runlock_fallback()
1091 rms_runlock(struct rmslock *rms) in rms_runlock() argument
1096 pcpu = rms_int_pcpu(rms); in rms_runlock()
1097 rms_int_influx_enter(rms, pcpu); in rms_runlock()
1099 if (__predict_false(rms->writers > 0)) { in rms_runlock()
1100 rms_runlock_fallback(rms); in rms_runlock()
1104 rms_int_readers_dec(rms, pcpu); in rms_runlock()
1106 rms_int_influx_exit(rms, pcpu); in rms_runlock()
1112 struct rmslock *rms; member
1121 struct rmslock *rms; in rms_action_func() local
1124 rms = rmsipi->rms; in rms_action_func()
1125 pcpu = rms_int_pcpu(rms); in rms_action_func()
1130 atomic_add_int(&rms->readers, pcpu->readers); in rms_action_func()
1141 struct rmslock *rms; in rms_wait_func() local
1144 rms = rmsipi->rms; in rms_wait_func()
1145 pcpu = rms_int_remote_pcpu(rms, cpu); in rms_wait_func()
1153 rms_assert_no_pcpu_readers(struct rmslock *rms) in rms_assert_no_pcpu_readers() argument
1159 pcpu = rms_int_remote_pcpu(rms, cpu); in rms_assert_no_pcpu_readers()
1168 rms_assert_no_pcpu_readers(struct rmslock *rms) in rms_assert_no_pcpu_readers() argument
1174 rms_wlock_switch(struct rmslock *rms) in rms_wlock_switch() argument
1178 MPASS(rms->readers == 0); in rms_wlock_switch()
1179 MPASS(rms->writers == 1); in rms_wlock_switch()
1181 rmsipi.rms = rms; in rms_wlock_switch()
1192 rms_wlock(struct rmslock *rms) in rms_wlock() argument
1196 MPASS(atomic_load_ptr(&rms->owner) != curthread); in rms_wlock()
1198 mtx_lock(&rms->mtx); in rms_wlock()
1199 rms->writers++; in rms_wlock()
1200 if (rms->writers > 1) { in rms_wlock()
1201 msleep(&rms->owner, &rms->mtx, PRI_MAX_KERN, in rms_wlock()
1202 mtx_name(&rms->mtx), 0); in rms_wlock()
1203 MPASS(rms->readers == 0); in rms_wlock()
1204 KASSERT(rms->owner == RMS_TRANSIENT, in rms_wlock()
1206 rms->owner)); in rms_wlock()
1210 KASSERT(rms->owner == RMS_NOOWNER, in rms_wlock()
1211 ("%s: unexpected owner value %p\n", __func__, rms->owner)); in rms_wlock()
1213 rms_wlock_switch(rms); in rms_wlock()
1214 rms_assert_no_pcpu_readers(rms); in rms_wlock()
1216 if (rms->readers > 0) { in rms_wlock()
1217 msleep(&rms->writers, &rms->mtx, PRI_MAX_KERN, in rms_wlock()
1218 mtx_name(&rms->mtx), 0); in rms_wlock()
1222 rms->owner = curthread; in rms_wlock()
1223 rms_assert_no_pcpu_readers(rms); in rms_wlock()
1224 mtx_unlock(&rms->mtx); in rms_wlock()
1225 MPASS(rms->readers == 0); in rms_wlock()
1230 rms_wunlock(struct rmslock *rms) in rms_wunlock() argument
1233 mtx_lock(&rms->mtx); in rms_wunlock()
1234 KASSERT(rms->owner == curthread, in rms_wunlock()
1235 ("%s: unexpected owner value %p\n", __func__, rms->owner)); in rms_wunlock()
1236 MPASS(rms->writers >= 1); in rms_wunlock()
1237 MPASS(rms->readers == 0); in rms_wunlock()
1238 rms->writers--; in rms_wunlock()
1239 if (rms->writers > 0) { in rms_wunlock()
1240 wakeup_one(&rms->owner); in rms_wunlock()
1241 rms->owner = RMS_TRANSIENT; in rms_wunlock()
1243 wakeup(&rms->readers); in rms_wunlock()
1244 rms->owner = RMS_NOOWNER; in rms_wunlock()
1246 mtx_unlock(&rms->mtx); in rms_wunlock()
1251 rms_unlock(struct rmslock *rms) in rms_unlock() argument
1254 if (rms_wowned(rms)) in rms_unlock()
1255 rms_wunlock(rms); in rms_unlock()
1257 rms_runlock(rms); in rms_unlock()