xref: /freebsd/sys/sys/blockcount.h (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5  * Copyright (c) 2020 The FreeBSD Foundation
6  *
7  * Portions of this software were developed by Mark Johnston under
8  * sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 #ifndef __SYS_BLOCKCOUNT_H__
35 #define __SYS_BLOCKCOUNT_H__
36 
37 #ifdef _KERNEL
38 
39 #include <sys/systm.h>
40 #include <sys/_blockcount.h>
41 
42 struct lock_object;
43 
44 int _blockcount_sleep(blockcount_t *bc, struct lock_object *, const char *wmesg,
45     int prio);
46 void _blockcount_wakeup(blockcount_t *bc, u_int old);
47 
48 static __inline void
49 blockcount_init(blockcount_t *bc)
50 {
51 	atomic_store_int(&bc->__count, 0);
52 }
53 
54 static __inline void
55 blockcount_acquire(blockcount_t *bc, u_int n)
56 {
57 #ifdef INVARIANTS
58 	u_int old;
59 
60 	old = atomic_fetchadd_int(&bc->__count, n);
61 	KASSERT(old + n > old, ("%s: counter overflow %p", __func__, bc));
62 #else
63 	atomic_add_int(&bc->__count, n);
64 #endif
65 }
66 
67 static __inline void
68 blockcount_release(blockcount_t *bc, u_int n)
69 {
70 	u_int old;
71 
72 	atomic_thread_fence_rel();
73 	old = atomic_fetchadd_int(&bc->__count, -n);
74 	KASSERT(old >= n, ("%s: counter underflow %p", __func__, bc));
75 	if (_BLOCKCOUNT_COUNT(old) == n && _BLOCKCOUNT_WAITERS(old))
76 		_blockcount_wakeup(bc, old);
77 }
78 
79 static __inline void
80 _blockcount_wait(blockcount_t *bc, struct lock_object *lo, const char *wmesg,
81     int prio)
82 {
83 	KASSERT((prio & ~PRIMASK) == 0, ("%s: invalid prio %x", __func__, prio));
84 
85 	while (_blockcount_sleep(bc, lo, wmesg, prio) == EAGAIN)
86 		;
87 }
88 
89 #define	blockcount_sleep(bc, lo, wmesg, prio)	\
90 	_blockcount_sleep((bc), (struct lock_object *)(lo), (wmesg), (prio))
91 #define	blockcount_wait(bc, lo, wmesg, prio)	\
92 	_blockcount_wait((bc), (struct lock_object *)(lo), (wmesg), (prio))
93 
94 #endif /* _KERNEL */
95 #endif /* !__SYS_BLOCKCOUNT_H__ */
96