xref: /freebsd/sys/sys/refcount.h (revision 9dc5b3dbb063f55543ff8a0619eee2e294fb7dc5)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2005 John Baldwin <jhb@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #ifndef __SYS_REFCOUNT_H__
29 #define __SYS_REFCOUNT_H__
30 
31 #include <sys/types.h>
32 #include <sys/kassert.h>
33 #if !defined(_KERNEL) && !defined(_STANDALONE)
34 #include <stdbool.h>
35 #endif
36 
37 #include <machine/atomic.h>
38 
39 #define	REFCOUNT_SATURATED(val)		(((val) & (1U << 31)) != 0)
40 #define	REFCOUNT_SATURATION_VALUE	(3U << 30)
41 
42 /*
43  * Attempt to handle reference count overflow and underflow.  Force the counter
44  * to stay at the saturation value so that a counter overflow cannot trigger
45  * destruction of the containing object and instead leads to a less harmful
46  * memory leak.
47  */
48 static __inline void
_refcount_update_saturated(volatile u_int * count)49 _refcount_update_saturated(volatile u_int *count)
50 {
51 #ifdef INVARIANTS
52 	panic("refcount %p wraparound", count);
53 #else
54 	atomic_store_int(count, REFCOUNT_SATURATION_VALUE);
55 #endif
56 }
57 
58 static __inline void
refcount_init(volatile u_int * count,u_int value)59 refcount_init(volatile u_int *count, u_int value)
60 {
61 	KASSERT(!REFCOUNT_SATURATED(value),
62 	    ("invalid initial refcount value %u", value));
63 	atomic_store_int(count, value);
64 }
65 
66 static __inline u_int
refcount_load(volatile const u_int * count)67 refcount_load(volatile const u_int *count)
68 {
69 	return (atomic_load_int(count));
70 }
71 
72 static __inline u_int
refcount_acquire(volatile u_int * count)73 refcount_acquire(volatile u_int *count)
74 {
75 	u_int old;
76 
77 	old = atomic_fetchadd_int(count, 1);
78 	if (__predict_false(REFCOUNT_SATURATED(old)))
79 		_refcount_update_saturated(count);
80 
81 	return (old);
82 }
83 
84 static __inline u_int
refcount_acquiren(volatile u_int * count,u_int n)85 refcount_acquiren(volatile u_int *count, u_int n)
86 {
87 	u_int old;
88 
89 	KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
90 	    ("refcount_acquiren: n=%u too large", n));
91 	old = atomic_fetchadd_int(count, n);
92 	if (__predict_false(REFCOUNT_SATURATED(old)))
93 		_refcount_update_saturated(count);
94 
95 	return (old);
96 }
97 
98 static __inline __result_use_check bool
refcount_acquire_checked(volatile u_int * count)99 refcount_acquire_checked(volatile u_int *count)
100 {
101 	u_int old;
102 
103 	old = atomic_load_int(count);
104 	for (;;) {
105 		if (__predict_false(REFCOUNT_SATURATED(old + 1)))
106 			return (false);
107 		if (__predict_true(atomic_fcmpset_int(count, &old,
108 		    old + 1) == 1))
109 			return (true);
110 	}
111 }
112 
113 /*
114  * This functions returns non-zero if the refcount was
115  * incremented. Else zero is returned.
116  */
117 static __inline __result_use_check bool
refcount_acquire_if_gt(volatile u_int * count,u_int n)118 refcount_acquire_if_gt(volatile u_int *count, u_int n)
119 {
120 	u_int old;
121 
122 	old = atomic_load_int(count);
123 	for (;;) {
124 		if (old <= n)
125 			return (false);
126 		if (__predict_false(REFCOUNT_SATURATED(old)))
127 			return (true);
128 		if (atomic_fcmpset_int(count, &old, old + 1))
129 			return (true);
130 	}
131 }
132 
133 static __inline __result_use_check bool
refcount_acquire_if_not_zero(volatile u_int * count)134 refcount_acquire_if_not_zero(volatile u_int *count)
135 {
136 
137 	return (refcount_acquire_if_gt(count, 0));
138 }
139 
140 static __inline bool
refcount_releasen(volatile u_int * count,u_int n)141 refcount_releasen(volatile u_int *count, u_int n)
142 {
143 	u_int old;
144 
145 	KASSERT(n < REFCOUNT_SATURATION_VALUE / 2,
146 	    ("refcount_releasen: n=%u too large", n));
147 
148 	atomic_thread_fence_rel();
149 	old = atomic_fetchadd_int(count, -n);
150 	if (__predict_false(old < n || REFCOUNT_SATURATED(old))) {
151 		_refcount_update_saturated(count);
152 		return (false);
153 	}
154 	if (old > n)
155 		return (false);
156 
157 	/*
158 	 * Last reference.  Signal the user to call the destructor.
159 	 *
160 	 * Ensure that the destructor sees all updates. This synchronizes with
161 	 * release fences from all routines which drop the count.
162 	 */
163 	atomic_thread_fence_acq();
164 	return (true);
165 }
166 
167 static __inline bool
refcount_release(volatile u_int * count)168 refcount_release(volatile u_int *count)
169 {
170 
171 	return (refcount_releasen(count, 1));
172 }
173 
174 #define	_refcount_release_if_cond(cond, name)				\
175 static __inline __result_use_check bool					\
176 _refcount_release_if_##name(volatile u_int *count, u_int n)		\
177 {									\
178 	u_int old;							\
179 									\
180 	KASSERT(n > 0, ("%s: zero increment", __func__));		\
181 	old = atomic_load_int(count);					\
182 	for (;;) {							\
183 		if (!(cond))						\
184 			return (false);					\
185 		if (__predict_false(REFCOUNT_SATURATED(old)))		\
186 			return (false);					\
187 		if (atomic_fcmpset_rel_int(count, &old, old - 1))	\
188 			return (true);					\
189 	}								\
190 }
191 _refcount_release_if_cond(old > n, gt)
192 _refcount_release_if_cond(old == n, eq)
193 
194 static __inline __result_use_check bool
refcount_release_if_gt(volatile u_int * count,u_int n)195 refcount_release_if_gt(volatile u_int *count, u_int n)
196 {
197 
198 	return (_refcount_release_if_gt(count, n));
199 }
200 
201 static __inline __result_use_check bool
refcount_release_if_last(volatile u_int * count)202 refcount_release_if_last(volatile u_int *count)
203 {
204 
205 	if (_refcount_release_if_eq(count, 1)) {
206 		/* See the comment in refcount_releasen(). */
207 		atomic_thread_fence_acq();
208 		return (true);
209 	}
210 	return (false);
211 }
212 
213 static __inline __result_use_check bool
refcount_release_if_not_last(volatile u_int * count)214 refcount_release_if_not_last(volatile u_int *count)
215 {
216 
217 	return (_refcount_release_if_gt(count, 1));
218 }
219 
220 #endif /* !__SYS_REFCOUNT_H__ */
221