xref: /freebsd/sys/kern/kern_rangelock.c (revision c3d8a93126b9dd05fcfed3685bc3817f3c1eccc9)
18f0e9130SKonstantin Belousov /*-
24d846d26SWarner Losh  * SPDX-License-Identifier: BSD-2-Clause
38a36da99SPedro F. Giffuni  *
48f0e9130SKonstantin Belousov  * Copyright (c) 2009 Konstantin Belousov <kib@FreeBSD.org>
58f0e9130SKonstantin Belousov  * All rights reserved.
68f0e9130SKonstantin Belousov  *
78f0e9130SKonstantin Belousov  * Redistribution and use in source and binary forms, with or without
88f0e9130SKonstantin Belousov  * modification, are permitted provided that the following conditions
98f0e9130SKonstantin Belousov  * are met:
108f0e9130SKonstantin Belousov  * 1. Redistributions of source code must retain the above copyright
118f0e9130SKonstantin Belousov  *    notice unmodified, this list of conditions, and the following
128f0e9130SKonstantin Belousov  *    disclaimer.
138f0e9130SKonstantin Belousov  * 2. Redistributions in binary form must reproduce the above copyright
148f0e9130SKonstantin Belousov  *    notice, this list of conditions and the following disclaimer in the
158f0e9130SKonstantin Belousov  *    documentation and/or other materials provided with the distribution.
168f0e9130SKonstantin Belousov  *
178f0e9130SKonstantin Belousov  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
188f0e9130SKonstantin Belousov  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
198f0e9130SKonstantin Belousov  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
208f0e9130SKonstantin Belousov  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
218f0e9130SKonstantin Belousov  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
228f0e9130SKonstantin Belousov  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
238f0e9130SKonstantin Belousov  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
248f0e9130SKonstantin Belousov  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
258f0e9130SKonstantin Belousov  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
268f0e9130SKonstantin Belousov  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
278f0e9130SKonstantin Belousov  */
288f0e9130SKonstantin Belousov 
298f0e9130SKonstantin Belousov #include <sys/param.h>
30*c3d8a931SKonstantin Belousov #include <sys/kassert.h>
318f0e9130SKonstantin Belousov #include <sys/kernel.h>
328f0e9130SKonstantin Belousov #include <sys/lock.h>
338f0e9130SKonstantin Belousov #include <sys/mutex.h>
348f0e9130SKonstantin Belousov #include <sys/proc.h>
358f0e9130SKonstantin Belousov #include <sys/rangelock.h>
36*c3d8a931SKonstantin Belousov #include <sys/sleepqueue.h>
37*c3d8a931SKonstantin Belousov #include <sys/smr.h>
388f0e9130SKonstantin Belousov 
398f0e9130SKonstantin Belousov #include <vm/uma.h>
408f0e9130SKonstantin Belousov 
41*c3d8a931SKonstantin Belousov /*
42*c3d8a931SKonstantin Belousov  * Implementation of range locks based on the paper
43*c3d8a931SKonstantin Belousov  * https://doi.org/10.1145/3342195.3387533
44*c3d8a931SKonstantin Belousov  * arXiv:2006.12144v1 [cs.OS] 22 Jun 2020
45*c3d8a931SKonstantin Belousov  * Scalable Range Locks for Scalable Address Spaces and Beyond
46*c3d8a931SKonstantin Belousov  * by Alex Kogan, Dave Dice, and Shady Issa
47*c3d8a931SKonstantin Belousov  */
48*c3d8a931SKonstantin Belousov 
49*c3d8a931SKonstantin Belousov static struct rl_q_entry *rl_e_unmark(const struct rl_q_entry *e);
50*c3d8a931SKonstantin Belousov 
51*c3d8a931SKonstantin Belousov /*
52*c3d8a931SKonstantin Belousov  * rl_q_next links all granted ranges in the lock.  We cannot free an
53*c3d8a931SKonstantin Belousov  * rl_q_entry while in the smr section, and cannot reuse rl_q_next
54*c3d8a931SKonstantin Belousov  * linkage since other threads might follow it even after CAS removed
55*c3d8a931SKonstantin Belousov  * the range.  Use rl_q_free for local list of ranges to remove after
56*c3d8a931SKonstantin Belousov  * the smr section is dropped.
57*c3d8a931SKonstantin Belousov  */
588f0e9130SKonstantin Belousov struct rl_q_entry {
59*c3d8a931SKonstantin Belousov 	struct rl_q_entry *rl_q_next;
60*c3d8a931SKonstantin Belousov 	struct rl_q_entry *rl_q_free;
618f0e9130SKonstantin Belousov 	off_t		rl_q_start, rl_q_end;
628f0e9130SKonstantin Belousov 	int		rl_q_flags;
63*c3d8a931SKonstantin Belousov #ifdef INVARIANTS
64*c3d8a931SKonstantin Belousov 	struct thread	*rl_q_owner;
65*c3d8a931SKonstantin Belousov #endif
668f0e9130SKonstantin Belousov };
678f0e9130SKonstantin Belousov 
688f0e9130SKonstantin Belousov static uma_zone_t rl_entry_zone;
69*c3d8a931SKonstantin Belousov static smr_t rl_smr;
708f0e9130SKonstantin Belousov 
718f0e9130SKonstantin Belousov static void
728f0e9130SKonstantin Belousov rangelock_sys_init(void)
738f0e9130SKonstantin Belousov {
748f0e9130SKonstantin Belousov 	rl_entry_zone = uma_zcreate("rl_entry", sizeof(struct rl_q_entry),
75*c3d8a931SKonstantin Belousov 	    NULL, NULL, NULL, NULL, UMA_ALIGNOF(struct rl_q_entry),
76*c3d8a931SKonstantin Belousov 	    UMA_ZONE_SMR);
77*c3d8a931SKonstantin Belousov 	rl_smr = uma_zone_get_smr(rl_entry_zone);
788f0e9130SKonstantin Belousov }
79*c3d8a931SKonstantin Belousov SYSINIT(rl, SI_SUB_LOCK, SI_ORDER_ANY, rangelock_sys_init, NULL);
808f0e9130SKonstantin Belousov 
818f0e9130SKonstantin Belousov static struct rl_q_entry *
82*c3d8a931SKonstantin Belousov rlqentry_alloc(vm_ooffset_t start, vm_ooffset_t end, int flags)
838f0e9130SKonstantin Belousov {
84*c3d8a931SKonstantin Belousov 	struct rl_q_entry *e;
858f0e9130SKonstantin Belousov 
86*c3d8a931SKonstantin Belousov 	e = uma_zalloc_smr(rl_entry_zone, M_WAITOK);
87*c3d8a931SKonstantin Belousov 	e->rl_q_next = NULL;
88*c3d8a931SKonstantin Belousov 	e->rl_q_free = NULL;
89*c3d8a931SKonstantin Belousov 	e->rl_q_start = start;
90*c3d8a931SKonstantin Belousov 	e->rl_q_end = end;
91*c3d8a931SKonstantin Belousov 	e->rl_q_flags = flags;
92*c3d8a931SKonstantin Belousov #ifdef INVARIANTS
93*c3d8a931SKonstantin Belousov 	e->rl_q_owner = curthread;
94*c3d8a931SKonstantin Belousov #endif
95*c3d8a931SKonstantin Belousov 	return (e);
968f0e9130SKonstantin Belousov }
978f0e9130SKonstantin Belousov 
988f0e9130SKonstantin Belousov void
998f0e9130SKonstantin Belousov rangelock_init(struct rangelock *lock)
1008f0e9130SKonstantin Belousov {
101*c3d8a931SKonstantin Belousov 	lock->sleepers = false;
102*c3d8a931SKonstantin Belousov 	atomic_store_ptr(&lock->head, NULL);
1038f0e9130SKonstantin Belousov }
1048f0e9130SKonstantin Belousov 
1058f0e9130SKonstantin Belousov void
1068f0e9130SKonstantin Belousov rangelock_destroy(struct rangelock *lock)
1078f0e9130SKonstantin Belousov {
108*c3d8a931SKonstantin Belousov 	struct rl_q_entry *e, *ep;
1098f0e9130SKonstantin Belousov 
110*c3d8a931SKonstantin Belousov 	MPASS(!lock->sleepers);
111*c3d8a931SKonstantin Belousov 	for (e = (struct rl_q_entry *)atomic_load_ptr(&lock->head);
112*c3d8a931SKonstantin Belousov 	    e != NULL; e = rl_e_unmark(ep)) {
113*c3d8a931SKonstantin Belousov 		ep = atomic_load_ptr(&e->rl_q_next);
114*c3d8a931SKonstantin Belousov 		uma_zfree_smr(rl_entry_zone, e);
115*c3d8a931SKonstantin Belousov 	}
1168f0e9130SKonstantin Belousov }
1178f0e9130SKonstantin Belousov 
118*c3d8a931SKonstantin Belousov static bool
119*c3d8a931SKonstantin Belousov rl_e_is_marked(const struct rl_q_entry *e)
1208f0e9130SKonstantin Belousov {
121*c3d8a931SKonstantin Belousov 	return (((uintptr_t)e & 1) != 0);
1228f0e9130SKonstantin Belousov }
1238f0e9130SKonstantin Belousov 
124*c3d8a931SKonstantin Belousov static struct rl_q_entry *
125*c3d8a931SKonstantin Belousov rl_e_unmark(const struct rl_q_entry *e)
1268f0e9130SKonstantin Belousov {
127*c3d8a931SKonstantin Belousov 	MPASS(rl_e_is_marked(e));
128*c3d8a931SKonstantin Belousov 	return ((struct rl_q_entry *)((uintptr_t)e & ~1));
1292bb93f2dSColin Percival }
1302bb93f2dSColin Percival 
131*c3d8a931SKonstantin Belousov static struct rl_q_entry *
132*c3d8a931SKonstantin Belousov rl_q_load(struct rl_q_entry **p)
1338f0e9130SKonstantin Belousov {
134*c3d8a931SKonstantin Belousov 	return ((struct rl_q_entry *)atomic_load_acq_ptr((uintptr_t *)p));
1358f0e9130SKonstantin Belousov }
1368f0e9130SKonstantin Belousov 
1378f0e9130SKonstantin Belousov void
138*c3d8a931SKonstantin Belousov rangelock_unlock(struct rangelock *lock, void *cookie)
1398f0e9130SKonstantin Belousov {
140*c3d8a931SKonstantin Belousov 	struct rl_q_entry *e;
1418f0e9130SKonstantin Belousov 
142*c3d8a931SKonstantin Belousov 	e = cookie;
143*c3d8a931SKonstantin Belousov 	MPASS(lock != NULL && e != NULL);
144*c3d8a931SKonstantin Belousov 	MPASS(!rl_e_is_marked(rl_q_load(&e->rl_q_next)));
145*c3d8a931SKonstantin Belousov 	MPASS(e->rl_q_owner == curthread);
1468f0e9130SKonstantin Belousov 
147*c3d8a931SKonstantin Belousov 	sleepq_lock(&lock->sleepers);
148*c3d8a931SKonstantin Belousov #ifdef INVARIANTS
149*c3d8a931SKonstantin Belousov 	int r = atomic_testandset_long((uintptr_t *)&e->rl_q_next, 0);
150*c3d8a931SKonstantin Belousov 	MPASS(r == 0);
151*c3d8a931SKonstantin Belousov #else
152*c3d8a931SKonstantin Belousov 	atomic_set_ptr((uintptr_t *)&e->rl_q_next, 1);
153*c3d8a931SKonstantin Belousov #endif
154*c3d8a931SKonstantin Belousov 	lock->sleepers = false;
155*c3d8a931SKonstantin Belousov 	sleepq_broadcast(&lock->sleepers, SLEEPQ_SLEEP, 0, 0);
156*c3d8a931SKonstantin Belousov 	sleepq_release(&lock->sleepers);
1578f0e9130SKonstantin Belousov }
1588f0e9130SKonstantin Belousov 
1598f0e9130SKonstantin Belousov /*
160*c3d8a931SKonstantin Belousov  * result: -1 if e1 before e2
161*c3d8a931SKonstantin Belousov  *          1 if e1 after e2
162*c3d8a931SKonstantin Belousov  *          0 if e1 and e2 overlap
1638f0e9130SKonstantin Belousov  */
164*c3d8a931SKonstantin Belousov static int
165*c3d8a931SKonstantin Belousov rl_e_compare(const struct rl_q_entry *e1, const struct rl_q_entry *e2)
1668f0e9130SKonstantin Belousov {
167*c3d8a931SKonstantin Belousov 	if (e1 == NULL)
168*c3d8a931SKonstantin Belousov 		return (1);
169*c3d8a931SKonstantin Belousov 	if (e1->rl_q_start >= e2->rl_q_end)
170*c3d8a931SKonstantin Belousov 		return (1);
171*c3d8a931SKonstantin Belousov 	if (e2->rl_q_start >= e1->rl_q_end)
172*c3d8a931SKonstantin Belousov 		return (-1);
173*c3d8a931SKonstantin Belousov 	return (0);
1748f0e9130SKonstantin Belousov }
1758f0e9130SKonstantin Belousov 
176*c3d8a931SKonstantin Belousov static void
177*c3d8a931SKonstantin Belousov rl_insert_sleep(struct rangelock *lock)
1788f0e9130SKonstantin Belousov {
179*c3d8a931SKonstantin Belousov 	smr_exit(rl_smr);
180*c3d8a931SKonstantin Belousov 	DROP_GIANT();
181*c3d8a931SKonstantin Belousov 	lock->sleepers = true;
182*c3d8a931SKonstantin Belousov 	sleepq_add(&lock->sleepers, NULL, "rangelk", 0, 0);
183*c3d8a931SKonstantin Belousov 	sleepq_wait(&lock->sleepers, PRI_USER);
184*c3d8a931SKonstantin Belousov 	PICKUP_GIANT();
185*c3d8a931SKonstantin Belousov 	smr_enter(rl_smr);
186*c3d8a931SKonstantin Belousov }
1878f0e9130SKonstantin Belousov 
188*c3d8a931SKonstantin Belousov static bool
189*c3d8a931SKonstantin Belousov rl_q_cas(struct rl_q_entry **prev, struct rl_q_entry *old,
190*c3d8a931SKonstantin Belousov     struct rl_q_entry *new)
191*c3d8a931SKonstantin Belousov {
192*c3d8a931SKonstantin Belousov 	return (atomic_cmpset_rel_ptr((uintptr_t *)prev, (uintptr_t)old,
193*c3d8a931SKonstantin Belousov 	    (uintptr_t)new) != 0);
194*c3d8a931SKonstantin Belousov }
1958f0e9130SKonstantin Belousov 
196*c3d8a931SKonstantin Belousov static bool
197*c3d8a931SKonstantin Belousov rl_insert(struct rangelock *lock, struct rl_q_entry *e, bool trylock,
198*c3d8a931SKonstantin Belousov     struct rl_q_entry **free)
199*c3d8a931SKonstantin Belousov {
200*c3d8a931SKonstantin Belousov 	struct rl_q_entry *cur, *next, **prev;
201*c3d8a931SKonstantin Belousov 	int r;
2028f0e9130SKonstantin Belousov 
203*c3d8a931SKonstantin Belousov again:
204*c3d8a931SKonstantin Belousov 	prev = &lock->head;
205*c3d8a931SKonstantin Belousov 	if (rl_q_load(prev) == NULL && rl_q_cas(prev, NULL, e))
206*c3d8a931SKonstantin Belousov 		return (true);
2078f0e9130SKonstantin Belousov 
208*c3d8a931SKonstantin Belousov 	for (cur = rl_q_load(prev);;) {
209*c3d8a931SKonstantin Belousov 		if (rl_e_is_marked(cur))
210*c3d8a931SKonstantin Belousov 			goto again;
211*c3d8a931SKonstantin Belousov 
212*c3d8a931SKonstantin Belousov 		if (cur != NULL) {
213*c3d8a931SKonstantin Belousov 			next = rl_q_load(&cur->rl_q_next);
214*c3d8a931SKonstantin Belousov 			if (rl_e_is_marked(next)) {
215*c3d8a931SKonstantin Belousov 				next = rl_e_unmark(next);
216*c3d8a931SKonstantin Belousov 				if (rl_q_cas(prev, cur, next)) {
217*c3d8a931SKonstantin Belousov #ifdef INVARIANTS
218*c3d8a931SKonstantin Belousov 					cur->rl_q_owner = NULL;
219*c3d8a931SKonstantin Belousov #endif
220*c3d8a931SKonstantin Belousov 					cur->rl_q_free = *free;
221*c3d8a931SKonstantin Belousov 					*free = cur;
222*c3d8a931SKonstantin Belousov 				}
223*c3d8a931SKonstantin Belousov 				cur = next;
224*c3d8a931SKonstantin Belousov 				continue;
225*c3d8a931SKonstantin Belousov 			}
226*c3d8a931SKonstantin Belousov 		}
227*c3d8a931SKonstantin Belousov 
228*c3d8a931SKonstantin Belousov 		r = rl_e_compare(cur, e);
229*c3d8a931SKonstantin Belousov 		if (r == -1) {
230*c3d8a931SKonstantin Belousov 			prev = &cur->rl_q_next;
231*c3d8a931SKonstantin Belousov 			cur = rl_q_load(prev);
232*c3d8a931SKonstantin Belousov 		} else if (r == 0) {
233*c3d8a931SKonstantin Belousov 			sleepq_lock(&lock->sleepers);
234*c3d8a931SKonstantin Belousov 			if (__predict_false(rl_e_is_marked(rl_q_load(
235*c3d8a931SKonstantin Belousov 			    &cur->rl_q_next)))) {
236*c3d8a931SKonstantin Belousov 				sleepq_release(&lock->sleepers);
237*c3d8a931SKonstantin Belousov 				continue;
238*c3d8a931SKonstantin Belousov 			}
239e3680954SRick Macklem 			if (trylock) {
240*c3d8a931SKonstantin Belousov 				sleepq_release(&lock->sleepers);
241*c3d8a931SKonstantin Belousov 				return (false);
242e3680954SRick Macklem 			}
243*c3d8a931SKonstantin Belousov 			rl_insert_sleep(lock);
244*c3d8a931SKonstantin Belousov 			/* e is still valid */
245*c3d8a931SKonstantin Belousov 			goto again;
246*c3d8a931SKonstantin Belousov 		} else /* r == 1 */ {
247*c3d8a931SKonstantin Belousov 			e->rl_q_next = cur;
248*c3d8a931SKonstantin Belousov 			if (rl_q_cas(prev, cur, e)) {
249*c3d8a931SKonstantin Belousov 				atomic_thread_fence_acq();
250*c3d8a931SKonstantin Belousov 				return (true);
251e3680954SRick Macklem 			}
252*c3d8a931SKonstantin Belousov 			/* Reset rl_q_next in case we hit fast path. */
253*c3d8a931SKonstantin Belousov 			e->rl_q_next = NULL;
254*c3d8a931SKonstantin Belousov 			cur = rl_q_load(prev);
255*c3d8a931SKonstantin Belousov 		}
256*c3d8a931SKonstantin Belousov 	}
257*c3d8a931SKonstantin Belousov }
258*c3d8a931SKonstantin Belousov 
259*c3d8a931SKonstantin Belousov static struct rl_q_entry *
260*c3d8a931SKonstantin Belousov rangelock_lock_int(struct rangelock *lock, struct rl_q_entry *e,
261*c3d8a931SKonstantin Belousov     bool trylock)
262*c3d8a931SKonstantin Belousov {
263*c3d8a931SKonstantin Belousov 	struct rl_q_entry *free, *x, *xp;
264*c3d8a931SKonstantin Belousov 	bool res;
265*c3d8a931SKonstantin Belousov 
266*c3d8a931SKonstantin Belousov 	free = NULL;
267*c3d8a931SKonstantin Belousov 	smr_enter(rl_smr);
268*c3d8a931SKonstantin Belousov 	res = rl_insert(lock, e, trylock, &free);
269*c3d8a931SKonstantin Belousov 	smr_exit(rl_smr);
270*c3d8a931SKonstantin Belousov 	MPASS(trylock || res);
271*c3d8a931SKonstantin Belousov 	if (!res) {
272*c3d8a931SKonstantin Belousov 		e->rl_q_free = free;
273*c3d8a931SKonstantin Belousov 		free = e;
274*c3d8a931SKonstantin Belousov 		e = NULL;
275*c3d8a931SKonstantin Belousov 	}
276*c3d8a931SKonstantin Belousov 	for (x = free; x != NULL; x = xp) {
277*c3d8a931SKonstantin Belousov 		MPASS(!rl_e_is_marked(x));
278*c3d8a931SKonstantin Belousov 		xp = x->rl_q_free;
279*c3d8a931SKonstantin Belousov 		MPASS(!rl_e_is_marked(xp));
280*c3d8a931SKonstantin Belousov 		uma_zfree_smr(rl_entry_zone, x);
281*c3d8a931SKonstantin Belousov 	}
282*c3d8a931SKonstantin Belousov 	return (e);
2838f0e9130SKonstantin Belousov }
2848f0e9130SKonstantin Belousov 
2858f0e9130SKonstantin Belousov void *
286*c3d8a931SKonstantin Belousov rangelock_rlock(struct rangelock *lock, vm_ooffset_t start, vm_ooffset_t end)
2878f0e9130SKonstantin Belousov {
288*c3d8a931SKonstantin Belousov 	struct rl_q_entry *e;
2898f0e9130SKonstantin Belousov 
290*c3d8a931SKonstantin Belousov 	e = rlqentry_alloc(start, end, RL_LOCK_READ);
291*c3d8a931SKonstantin Belousov 	return (rangelock_lock_int(lock, e, false));
292e3680954SRick Macklem }
293e3680954SRick Macklem 
294e3680954SRick Macklem void *
295*c3d8a931SKonstantin Belousov rangelock_tryrlock(struct rangelock *lock, vm_ooffset_t start, vm_ooffset_t end)
296e3680954SRick Macklem {
297*c3d8a931SKonstantin Belousov 	struct rl_q_entry *e;
298e3680954SRick Macklem 
299*c3d8a931SKonstantin Belousov 	e = rlqentry_alloc(start, end, RL_LOCK_READ);
300*c3d8a931SKonstantin Belousov 	return (rangelock_lock_int(lock, e, true));
3018f0e9130SKonstantin Belousov }
3028f0e9130SKonstantin Belousov 
3038f0e9130SKonstantin Belousov void *
304*c3d8a931SKonstantin Belousov rangelock_wlock(struct rangelock *lock, vm_ooffset_t start, vm_ooffset_t end)
3058f0e9130SKonstantin Belousov {
306*c3d8a931SKonstantin Belousov 	struct rl_q_entry *e;
3078f0e9130SKonstantin Belousov 
308*c3d8a931SKonstantin Belousov 	e = rlqentry_alloc(start, end, RL_LOCK_WRITE);
309*c3d8a931SKonstantin Belousov 	return (rangelock_lock_int(lock, e, true));
310e3680954SRick Macklem }
311e3680954SRick Macklem 
312e3680954SRick Macklem void *
313*c3d8a931SKonstantin Belousov rangelock_trywlock(struct rangelock *lock, vm_ooffset_t start, vm_ooffset_t end)
314e3680954SRick Macklem {
315*c3d8a931SKonstantin Belousov 	struct rl_q_entry *e;
316e3680954SRick Macklem 
317*c3d8a931SKonstantin Belousov 	e = rlqentry_alloc(start, end, RL_LOCK_WRITE);
318*c3d8a931SKonstantin Belousov 	return (rangelock_lock_int(lock, e, true));
3198f0e9130SKonstantin Belousov }
3203155f2f0SKyle Evans 
3213155f2f0SKyle Evans #ifdef INVARIANT_SUPPORT
3223155f2f0SKyle Evans void
3233155f2f0SKyle Evans _rangelock_cookie_assert(void *cookie, int what, const char *file, int line)
3243155f2f0SKyle Evans {
3253155f2f0SKyle Evans }
3263155f2f0SKyle Evans #endif	/* INVARIANT_SUPPORT */
327*c3d8a931SKonstantin Belousov 
328*c3d8a931SKonstantin Belousov #include "opt_ddb.h"
329*c3d8a931SKonstantin Belousov #ifdef DDB
330*c3d8a931SKonstantin Belousov #include <ddb/ddb.h>
331*c3d8a931SKonstantin Belousov 
332*c3d8a931SKonstantin Belousov DB_SHOW_COMMAND(rangelock, db_show_rangelock)
333*c3d8a931SKonstantin Belousov {
334*c3d8a931SKonstantin Belousov 	struct rangelock *lock;
335*c3d8a931SKonstantin Belousov 	struct rl_q_entry *e, *x;
336*c3d8a931SKonstantin Belousov 
337*c3d8a931SKonstantin Belousov 	if (!have_addr) {
338*c3d8a931SKonstantin Belousov 		db_printf("show rangelock addr\n");
339*c3d8a931SKonstantin Belousov 		return;
340*c3d8a931SKonstantin Belousov 	}
341*c3d8a931SKonstantin Belousov 
342*c3d8a931SKonstantin Belousov 	lock = (struct rangelock *)addr;
343*c3d8a931SKonstantin Belousov 	db_printf("rangelock %p sleepers %d\n", lock, lock->sleepers);
344*c3d8a931SKonstantin Belousov 	for (e = lock->head;;) {
345*c3d8a931SKonstantin Belousov 		x = rl_e_is_marked(e) ? rl_e_unmark(e) : e;
346*c3d8a931SKonstantin Belousov 		if (x == NULL)
347*c3d8a931SKonstantin Belousov 			break;
348*c3d8a931SKonstantin Belousov 		db_printf("  entry %p marked %d %d start %#jx end %#jx "
349*c3d8a931SKonstantin Belousov 		    "flags %x next %p",
350*c3d8a931SKonstantin Belousov 		    e, rl_e_is_marked(e), rl_e_is_marked(x->rl_q_next),
351*c3d8a931SKonstantin Belousov 		    x->rl_q_start, x->rl_q_end, x->rl_q_flags, x->rl_q_next);
352*c3d8a931SKonstantin Belousov #ifdef INVARIANTS
353*c3d8a931SKonstantin Belousov 		db_printf(" owner %p (%d)", x->rl_q_owner,
354*c3d8a931SKonstantin Belousov 		    x->rl_q_owner != NULL ? x->rl_q_owner->td_tid : -1);
355*c3d8a931SKonstantin Belousov #endif
356*c3d8a931SKonstantin Belousov 		db_printf("\n");
357*c3d8a931SKonstantin Belousov 		e = x->rl_q_next;
358*c3d8a931SKonstantin Belousov 	}
359*c3d8a931SKonstantin Belousov }
360*c3d8a931SKonstantin Belousov 
361*c3d8a931SKonstantin Belousov #endif	/* DDB */
362