xref: /linux/include/linux/rculist_nulls.h (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_RCULIST_NULLS_H
3 #define _LINUX_RCULIST_NULLS_H
4 
5 #ifdef __KERNEL__
6 
7 /*
8  * RCU-protected list version
9  */
10 #include <linux/list_nulls.h>
11 #include <linux/rcupdate.h>
12 
13 /**
14  * hlist_nulls_del_init_rcu - deletes entry from hash list with re-initialization
15  * @n: the element to delete from the hash list.
16  *
17  * Note: hlist_nulls_unhashed() on the node return true after this. It is
18  * useful for RCU based read lockfree traversal if the writer side
19  * must know if the list entry is still hashed or already unhashed.
20  *
21  * In particular, it means that we can not poison the forward pointers
22  * that may still be used for walking the hash list and we can only
23  * zero the pprev pointer so list_unhashed() will return true after
24  * this.
25  *
26  * The caller must take whatever precautions are necessary (such as
27  * holding appropriate locks) to avoid racing with another
28  * list-mutation primitive, such as hlist_nulls_add_head_rcu() or
29  * hlist_nulls_del_rcu(), running on this same list.  However, it is
30  * perfectly legal to run concurrently with the _rcu list-traversal
31  * primitives, such as hlist_nulls_for_each_entry_rcu().
32  */
33 static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
34 {
35 	if (!hlist_nulls_unhashed(n)) {
36 		__hlist_nulls_del(n);
37 		WRITE_ONCE(n->pprev, NULL);
38 	}
39 }
40 
41 /**
42  * hlist_nulls_first_rcu - returns the first element of the hash list.
43  * @head: the head of the list.
44  */
45 #define hlist_nulls_first_rcu(head) \
46 	(*((struct hlist_nulls_node __rcu __force **)&(head)->first))
47 
48 /**
49  * hlist_nulls_next_rcu - returns the element of the list after @node.
50  * @node: element of the list.
51  */
52 #define hlist_nulls_next_rcu(node) \
53 	(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
54 
55 /**
56  * hlist_nulls_pprev_rcu - returns the dereferenced pprev of @node.
57  * @node: element of the list.
58  */
59 #define hlist_nulls_pprev_rcu(node) \
60 	(*((struct hlist_nulls_node __rcu __force **)(node)->pprev))
61 
62 /**
63  * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization
64  * @n: the element to delete from the hash list.
65  *
66  * Note: hlist_nulls_unhashed() on entry does not return true after this,
67  * the entry is in an undefined state. It is useful for RCU based
68  * lockfree traversal.
69  *
70  * In particular, it means that we can not poison the forward
71  * pointers that may still be used for walking the hash list.
72  *
73  * The caller must take whatever precautions are necessary
74  * (such as holding appropriate locks) to avoid racing
75  * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
76  * or hlist_nulls_del_rcu(), running on this same list.
77  * However, it is perfectly legal to run concurrently with
78  * the _rcu list-traversal primitives, such as
79  * hlist_nulls_for_each_entry().
80  */
81 static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
82 {
83 	__hlist_nulls_del(n);
84 	WRITE_ONCE(n->pprev, LIST_POISON2);
85 }
86 
87 /**
88  * hlist_nulls_add_head_rcu
89  * @n: the element to add to the hash list.
90  * @h: the list to add to.
91  *
92  * Description:
93  * Adds the specified element to the specified hlist_nulls,
94  * while permitting racing traversals.
95  *
96  * The caller must take whatever precautions are necessary
97  * (such as holding appropriate locks) to avoid racing
98  * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
99  * or hlist_nulls_del_rcu(), running on this same list.
100  * However, it is perfectly legal to run concurrently with
101  * the _rcu list-traversal primitives, such as
102  * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
103  * problems on Alpha CPUs.  Regardless of the type of CPU, the
104  * list-traversal primitive must be guarded by rcu_read_lock().
105  */
106 static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
107 					struct hlist_nulls_head *h)
108 {
109 	struct hlist_nulls_node *first = h->first;
110 
111 	WRITE_ONCE(n->next, first);
112 	WRITE_ONCE(n->pprev, &h->first);
113 	rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
114 	if (!is_a_nulls(first))
115 		WRITE_ONCE(first->pprev, &n->next);
116 }
117 
118 /**
119  * hlist_nulls_add_tail_rcu
120  * @n: the element to add to the hash list.
121  * @h: the list to add to.
122  *
123  * Description:
124  * Adds the specified element to the specified hlist_nulls,
125  * while permitting racing traversals.
126  *
127  * The caller must take whatever precautions are necessary
128  * (such as holding appropriate locks) to avoid racing
129  * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
130  * or hlist_nulls_del_rcu(), running on this same list.
131  * However, it is perfectly legal to run concurrently with
132  * the _rcu list-traversal primitives, such as
133  * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
134  * problems on Alpha CPUs.  Regardless of the type of CPU, the
135  * list-traversal primitive must be guarded by rcu_read_lock().
136  */
137 static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
138 					    struct hlist_nulls_head *h)
139 {
140 	struct hlist_nulls_node *i, *last = NULL;
141 
142 	/* Note: write side code, so rcu accessors are not needed. */
143 	for (i = h->first; !is_a_nulls(i); i = i->next)
144 		last = i;
145 
146 	if (last) {
147 		WRITE_ONCE(n->next, last->next);
148 		WRITE_ONCE(n->pprev, &last->next);
149 		rcu_assign_pointer(hlist_nulls_next_rcu(last), n);
150 	} else {
151 		hlist_nulls_add_head_rcu(n, h);
152 	}
153 }
154 
155 /* after that hlist_nulls_del will work */
156 static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
157 {
158 	WRITE_ONCE(n->pprev, &n->next);
159 	WRITE_ONCE(n->next, (struct hlist_nulls_node *)NULLS_MARKER(NULL));
160 }
161 
162 /**
163  * hlist_nulls_replace_rcu - replace an old entry by a new one
164  * @old: the element to be replaced
165  * @new: the new element to insert
166  *
167  * Description:
168  * Replace the old entry with the new one in a RCU-protected hlist_nulls, while
169  * permitting racing traversals.
170  *
171  * The caller must take whatever precautions are necessary (such as holding
172  * appropriate locks) to avoid racing with another list-mutation primitive, such
173  * as hlist_nulls_add_head_rcu() or hlist_nulls_del_rcu(), running on this same
174  * list.  However, it is perfectly legal to run concurrently with the _rcu
175  * list-traversal primitives, such as hlist_nulls_for_each_entry_rcu().
176  */
177 static inline void hlist_nulls_replace_rcu(struct hlist_nulls_node *old,
178 					   struct hlist_nulls_node *new)
179 {
180 	struct hlist_nulls_node *next = old->next;
181 
182 	WRITE_ONCE(new->next, next);
183 	WRITE_ONCE(new->pprev, old->pprev);
184 	rcu_assign_pointer(hlist_nulls_pprev_rcu(new), new);
185 	if (!is_a_nulls(next))
186 		WRITE_ONCE(next->pprev, &new->next);
187 }
188 
189 /**
190  * hlist_nulls_replace_init_rcu - replace an old entry by a new one and
191  * initialize the old
192  * @old: the element to be replaced
193  * @new: the new element to insert
194  *
195  * Description:
196  * Replace the old entry with the new one in a RCU-protected hlist_nulls, while
197  * permitting racing traversals, and reinitialize the old entry.
198  *
199  * Note: @old must be hashed.
200  *
201  * The caller must take whatever precautions are necessary (such as holding
202  * appropriate locks) to avoid racing with another list-mutation primitive, such
203  * as hlist_nulls_add_head_rcu() or hlist_nulls_del_rcu(), running on this same
204  * list. However, it is perfectly legal to run concurrently with the _rcu
205  * list-traversal primitives, such as hlist_nulls_for_each_entry_rcu().
206  */
207 static inline void hlist_nulls_replace_init_rcu(struct hlist_nulls_node *old,
208 						struct hlist_nulls_node *new)
209 {
210 	hlist_nulls_replace_rcu(old, new);
211 	WRITE_ONCE(old->pprev, NULL);
212 }
213 
214 /**
215  * hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
216  * @tpos:	the type * to use as a loop cursor.
217  * @pos:	the &struct hlist_nulls_node to use as a loop cursor.
218  * @head:	the head of the list.
219  * @member:	the name of the hlist_nulls_node within the struct.
220  *
221  * The barrier() is needed to make sure compiler doesn't cache first element [1],
222  * as this loop can be restarted [2]
223  * [1] Documentation/memory-barriers.txt around line 1533
224  * [2] Documentation/RCU/rculist_nulls.rst around line 146
225  */
226 #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member)			\
227 	for (({barrier();}),							\
228 	     pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));		\
229 		(!is_a_nulls(pos)) &&						\
230 		({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \
231 		pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)))
232 
233 /**
234  * hlist_nulls_for_each_entry_safe -
235  *   iterate over list of given type safe against removal of list entry
236  * @tpos:	the type * to use as a loop cursor.
237  * @pos:	the &struct hlist_nulls_node to use as a loop cursor.
238  * @head:	the head of the list.
239  * @member:	the name of the hlist_nulls_node within the struct.
240  */
241 #define hlist_nulls_for_each_entry_safe(tpos, pos, head, member)		\
242 	for (({barrier();}),							\
243 	     pos = rcu_dereference_raw(hlist_nulls_first_rcu(head));		\
244 		(!is_a_nulls(pos)) &&						\
245 		({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member);	\
246 		   pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos)); 1; });)
247 #endif
248 #endif
249