xref: /linux/include/linux/rcuref.h (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #ifndef _LINUX_RCUREF_H
3 #define _LINUX_RCUREF_H
4 
5 #include <linux/atomic.h>
6 #include <linux/bug.h>
7 #include <linux/limits.h>
8 #include <linux/lockdep.h>
9 #include <linux/preempt.h>
10 #include <linux/rcupdate.h>
11 
12 #define RCUREF_ONEREF		0x00000000U
13 #define RCUREF_MAXREF		0x7FFFFFFFU
14 #define RCUREF_SATURATED	0xA0000000U
15 #define RCUREF_RELEASED		0xC0000000U
16 #define RCUREF_DEAD		0xE0000000U
17 #define RCUREF_NOREF		0xFFFFFFFFU
18 
19 /**
20  * rcuref_init - Initialize a rcuref reference count with the given reference count
21  * @ref:	Pointer to the reference count
22  * @cnt:	The initial reference count typically '1'
23  */
rcuref_init(rcuref_t * ref,unsigned int cnt)24 static inline void rcuref_init(rcuref_t *ref, unsigned int cnt)
25 {
26 	atomic_set(&ref->refcnt, cnt - 1);
27 }
28 
29 /**
30  * rcuref_read - Read the number of held reference counts of a rcuref
31  * @ref:	Pointer to the reference count
32  *
33  * Return: The number of held references (0 ... N)
34  */
rcuref_read(rcuref_t * ref)35 static inline unsigned int rcuref_read(rcuref_t *ref)
36 {
37 	unsigned int c = atomic_read(&ref->refcnt);
38 
39 	/* Return 0 if within the DEAD zone. */
40 	return c >= RCUREF_RELEASED ? 0 : c + 1;
41 }
42 
43 extern __must_check bool rcuref_get_slowpath(rcuref_t *ref);
44 
45 /**
46  * rcuref_get - Acquire one reference on a rcuref reference count
47  * @ref:	Pointer to the reference count
48  *
49  * Similar to atomic_inc_not_zero() but saturates at RCUREF_MAXREF.
50  *
51  * Provides no memory ordering, it is assumed the caller has guaranteed the
52  * object memory to be stable (RCU, etc.). It does provide a control dependency
53  * and thereby orders future stores. See documentation in lib/rcuref.c
54  *
55  * Return:
56  *	False if the attempt to acquire a reference failed. This happens
57  *	when the last reference has been put already
58  *
59  *	True if a reference was successfully acquired
60  */
rcuref_get(rcuref_t * ref)61 static inline __must_check bool rcuref_get(rcuref_t *ref)
62 {
63 	/*
64 	 * Unconditionally increase the reference count. The saturation and
65 	 * dead zones provide enough tolerance for this.
66 	 */
67 	if (likely(!atomic_add_negative_relaxed(1, &ref->refcnt)))
68 		return true;
69 
70 	/* Handle the cases inside the saturation and dead zones */
71 	return rcuref_get_slowpath(ref);
72 }
73 
74 extern __must_check bool rcuref_put_slowpath(rcuref_t *ref);
75 
76 /*
77  * Internal helper. Do not invoke directly.
78  */
__rcuref_put(rcuref_t * ref)79 static __always_inline __must_check bool __rcuref_put(rcuref_t *ref)
80 {
81 	RCU_LOCKDEP_WARN(!rcu_read_lock_held() && preemptible(),
82 			 "suspicious rcuref_put_rcusafe() usage");
83 	/*
84 	 * Unconditionally decrease the reference count. The saturation and
85 	 * dead zones provide enough tolerance for this.
86 	 */
87 	if (likely(!atomic_add_negative_release(-1, &ref->refcnt)))
88 		return false;
89 
90 	/*
91 	 * Handle the last reference drop and cases inside the saturation
92 	 * and dead zones.
93 	 */
94 	return rcuref_put_slowpath(ref);
95 }
96 
97 /**
98  * rcuref_put_rcusafe -- Release one reference for a rcuref reference count RCU safe
99  * @ref:	Pointer to the reference count
100  *
101  * Provides release memory ordering, such that prior loads and stores are done
102  * before, and provides an acquire ordering on success such that free()
103  * must come after.
104  *
105  * Can be invoked from contexts, which guarantee that no grace period can
106  * happen which would free the object concurrently if the decrement drops
107  * the last reference and the slowpath races against a concurrent get() and
108  * put() pair. rcu_read_lock()'ed and atomic contexts qualify.
109  *
110  * Return:
111  *	True if this was the last reference with no future references
112  *	possible. This signals the caller that it can safely release the
113  *	object which is protected by the reference counter.
114  *
115  *	False if there are still active references or the put() raced
116  *	with a concurrent get()/put() pair. Caller is not allowed to
117  *	release the protected object.
118  */
rcuref_put_rcusafe(rcuref_t * ref)119 static inline __must_check bool rcuref_put_rcusafe(rcuref_t *ref)
120 {
121 	return __rcuref_put(ref);
122 }
123 
124 /**
125  * rcuref_put -- Release one reference for a rcuref reference count
126  * @ref:	Pointer to the reference count
127  *
128  * Can be invoked from any context.
129  *
130  * Provides release memory ordering, such that prior loads and stores are done
131  * before, and provides an acquire ordering on success such that free()
132  * must come after.
133  *
134  * Return:
135  *
136  *	True if this was the last reference with no future references
137  *	possible. This signals the caller that it can safely schedule the
138  *	object, which is protected by the reference counter, for
139  *	deconstruction.
140  *
141  *	False if there are still active references or the put() raced
142  *	with a concurrent get()/put() pair. Caller is not allowed to
143  *	deconstruct the protected object.
144  */
rcuref_put(rcuref_t * ref)145 static inline __must_check bool rcuref_put(rcuref_t *ref)
146 {
147 	bool released;
148 
149 	preempt_disable();
150 	released = __rcuref_put(ref);
151 	preempt_enable();
152 	return released;
153 }
154 
155 #endif
156