xref: /linux/include/linux/rculist.h (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_RCULIST_H
3 #define _LINUX_RCULIST_H
4 
5 #ifdef __KERNEL__
6 
7 /*
8  * RCU-protected list version
9  */
10 #include <linux/list.h>
11 #include <linux/rcupdate.h>
12 
13 /*
14  * INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
15  * @list: list to be initialized
16  *
17  * You should instead use INIT_LIST_HEAD() for normal initialization and
18  * cleanup tasks, when readers have no access to the list being initialized.
19  * However, if the list being initialized is visible to readers, you
20  * need to keep the compiler from being too mischievous.
21  */
INIT_LIST_HEAD_RCU(struct list_head * list)22 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
23 {
24 	WRITE_ONCE(list->next, list);
25 	WRITE_ONCE(list->prev, list);
26 }
27 
28 /*
29  * return the ->next pointer of a list_head in an rcu safe
30  * way, we must not access it directly
31  */
32 #define list_next_rcu(list)	(*((struct list_head __rcu **)(&(list)->next)))
33 /*
34  * Return the ->prev pointer of a list_head in an rcu safe way. Don't
35  * access it directly.
36  *
37  * Any list traversed with list_bidir_prev_rcu() must never use
38  * list_del_rcu().  Doing so will poison the ->prev pointer that
39  * list_bidir_prev_rcu() relies on, which will result in segfaults.
40  * To prevent these segfaults, use list_bidir_del_rcu() instead
41  * of list_del_rcu().
42  */
43 #define list_bidir_prev_rcu(list) (*((struct list_head __rcu **)(&(list)->prev)))
44 
45 /**
46  * list_for_each_rcu - Iterate over a list in an RCU-safe fashion
47  * @pos:	the &struct list_head to use as a loop cursor.
48  * @head:	the head for your list.
49  */
50 #define list_for_each_rcu(pos, head)		  \
51 	for (pos = rcu_dereference((head)->next); \
52 	     !list_is_head(pos, (head)); \
53 	     pos = rcu_dereference(pos->next))
54 
55 /**
56  * list_tail_rcu - returns the prev pointer of the head of the list
57  * @head: the head of the list
58  *
59  * Note: This should only be used with the list header, and even then
60  * only if list_del() and similar primitives are not also used on the
61  * list header.
62  */
63 #define list_tail_rcu(head)	(*((struct list_head __rcu **)(&(head)->prev)))
64 
65 /*
66  * Check during list traversal that we are within an RCU reader
67  */
68 
69 #define check_arg_count_one(dummy)
70 
71 #ifdef CONFIG_PROVE_RCU_LIST
72 #define __list_check_rcu(dummy, cond, extra...)				\
73 	({								\
74 	check_arg_count_one(extra);					\
75 	RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(),		\
76 			 "RCU-list traversed in non-reader section!");	\
77 	})
78 
79 #define __list_check_srcu(cond)					 \
80 	({								 \
81 	RCU_LOCKDEP_WARN(!(cond),					 \
82 		"RCU-list traversed without holding the required lock!");\
83 	})
84 #else
85 #define __list_check_rcu(dummy, cond, extra...)				\
86 	({ check_arg_count_one(extra); })
87 
88 #define __list_check_srcu(cond) ({ })
89 #endif
90 
91 /*
92  * Insert a new entry between two known consecutive entries.
93  *
94  * This is only for internal list manipulation where we know
95  * the prev/next entries already!
96  */
__list_add_rcu(struct list_head * new,struct list_head * prev,struct list_head * next)97 static inline void __list_add_rcu(struct list_head *new,
98 		struct list_head *prev, struct list_head *next)
99 {
100 	if (!__list_add_valid(new, prev, next))
101 		return;
102 
103 	new->next = next;
104 	new->prev = prev;
105 	rcu_assign_pointer(list_next_rcu(prev), new);
106 	next->prev = new;
107 }
108 
109 /**
110  * list_add_rcu - add a new entry to rcu-protected list
111  * @new: new entry to be added
112  * @head: list head to add it after
113  *
114  * Insert a new entry after the specified head.
115  * This is good for implementing stacks.
116  *
117  * The caller must take whatever precautions are necessary
118  * (such as holding appropriate locks) to avoid racing
119  * with another list-mutation primitive, such as list_add_rcu()
120  * or list_del_rcu(), running on this same list.
121  * However, it is perfectly legal to run concurrently with
122  * the _rcu list-traversal primitives, such as
123  * list_for_each_entry_rcu().
124  */
list_add_rcu(struct list_head * new,struct list_head * head)125 static inline void list_add_rcu(struct list_head *new, struct list_head *head)
126 {
127 	__list_add_rcu(new, head, head->next);
128 }
129 
130 /**
131  * list_add_tail_rcu - add a new entry to rcu-protected list
132  * @new: new entry to be added
133  * @head: list head to add it before
134  *
135  * Insert a new entry before the specified head.
136  * This is useful for implementing queues.
137  *
138  * The caller must take whatever precautions are necessary
139  * (such as holding appropriate locks) to avoid racing
140  * with another list-mutation primitive, such as list_add_tail_rcu()
141  * or list_del_rcu(), running on this same list.
142  * However, it is perfectly legal to run concurrently with
143  * the _rcu list-traversal primitives, such as
144  * list_for_each_entry_rcu().
145  */
list_add_tail_rcu(struct list_head * new,struct list_head * head)146 static inline void list_add_tail_rcu(struct list_head *new,
147 					struct list_head *head)
148 {
149 	__list_add_rcu(new, head->prev, head);
150 }
151 
152 /**
153  * list_del_rcu - deletes entry from list without re-initialization
154  * @entry: the element to delete from the list.
155  *
156  * Note: list_empty() on entry does not return true after this,
157  * the entry is in an undefined state. It is useful for RCU based
158  * lockfree traversal.
159  *
160  * In particular, it means that we can not poison the forward
161  * pointers that may still be used for walking the list.
162  *
163  * The caller must take whatever precautions are necessary
164  * (such as holding appropriate locks) to avoid racing
165  * with another list-mutation primitive, such as list_del_rcu()
166  * or list_add_rcu(), running on this same list.
167  * However, it is perfectly legal to run concurrently with
168  * the _rcu list-traversal primitives, such as
169  * list_for_each_entry_rcu().
170  *
171  * Note that the caller is not permitted to immediately free
172  * the newly deleted entry.  Instead, either synchronize_rcu()
173  * or call_rcu() must be used to defer freeing until an RCU
174  * grace period has elapsed.
175  */
list_del_rcu(struct list_head * entry)176 static inline void list_del_rcu(struct list_head *entry)
177 {
178 	__list_del_entry(entry);
179 	entry->prev = LIST_POISON2;
180 }
181 
182 /**
183  * list_bidir_del_rcu - deletes entry from list without re-initialization
184  * @entry: the element to delete from the list.
185  *
186  * In contrast to list_del_rcu() doesn't poison the prev pointer thus
187  * allowing backwards traversal via list_bidir_prev_rcu().
188  *
189  * Note: list_empty() on entry does not return true after this because
190  * the entry is in a special undefined state that permits RCU-based
191  * lockfree reverse traversal. In particular this means that we can not
192  * poison the forward and backwards pointers that may still be used for
193  * walking the list.
194  *
195  * The caller must take whatever precautions are necessary (such as
196  * holding appropriate locks) to avoid racing with another list-mutation
197  * primitive, such as list_bidir_del_rcu() or list_add_rcu(), running on
198  * this same list. However, it is perfectly legal to run concurrently
199  * with the _rcu list-traversal primitives, such as
200  * list_for_each_entry_rcu().
201  *
202  * Note that list_del_rcu() and list_bidir_del_rcu() must not be used on
203  * the same list.
204  *
205  * Note that the caller is not permitted to immediately free
206  * the newly deleted entry.  Instead, either synchronize_rcu()
207  * or call_rcu() must be used to defer freeing until an RCU
208  * grace period has elapsed.
209  */
list_bidir_del_rcu(struct list_head * entry)210 static inline void list_bidir_del_rcu(struct list_head *entry)
211 {
212 	__list_del_entry(entry);
213 }
214 
215 /**
216  * hlist_del_init_rcu - deletes entry from hash list with re-initialization
217  * @n: the element to delete from the hash list.
218  *
219  * Note: list_unhashed() on the node return true after this. It is
220  * useful for RCU based read lockfree traversal if the writer side
221  * must know if the list entry is still hashed or already unhashed.
222  *
223  * In particular, it means that we can not poison the forward pointers
224  * that may still be used for walking the hash list and we can only
225  * zero the pprev pointer so list_unhashed() will return true after
226  * this.
227  *
228  * The caller must take whatever precautions are necessary (such as
229  * holding appropriate locks) to avoid racing with another
230  * list-mutation primitive, such as hlist_add_head_rcu() or
231  * hlist_del_rcu(), running on this same list.  However, it is
232  * perfectly legal to run concurrently with the _rcu list-traversal
233  * primitives, such as hlist_for_each_entry_rcu().
234  */
hlist_del_init_rcu(struct hlist_node * n)235 static inline void hlist_del_init_rcu(struct hlist_node *n)
236 {
237 	if (!hlist_unhashed(n)) {
238 		__hlist_del(n);
239 		WRITE_ONCE(n->pprev, NULL);
240 	}
241 }
242 
243 /**
244  * list_replace_rcu - replace old entry by new one
245  * @old : the element to be replaced
246  * @new : the new element to insert
247  *
248  * The @old entry will be replaced with the @new entry atomically from
249  * the perspective of concurrent readers.  It is the caller's responsibility
250  * to synchronize with concurrent updaters, if any.
251  *
252  * Note: @old should not be empty.
253  */
list_replace_rcu(struct list_head * old,struct list_head * new)254 static inline void list_replace_rcu(struct list_head *old,
255 				struct list_head *new)
256 {
257 	new->next = old->next;
258 	new->prev = old->prev;
259 	rcu_assign_pointer(list_next_rcu(new->prev), new);
260 	new->next->prev = new;
261 	old->prev = LIST_POISON2;
262 }
263 
__list_splice_rcu(struct list_head * list,struct list_head * prev,struct list_head * next)264 static inline void __list_splice_rcu(struct list_head *list,
265 				     struct list_head *prev,
266 				     struct list_head *next)
267 {
268 	struct list_head *first = list->next;
269 	struct list_head *last = list->prev;
270 
271 	last->next = next;
272 	first->prev = prev;
273 	next->prev = last;
274 	rcu_assign_pointer(list_next_rcu(prev), first);
275 }
276 
277 /**
278  * list_splice_rcu - splice a non-RCU list into an RCU-protected list,
279  *                   designed for stacks.
280  * @list:	the non RCU-protected list to splice
281  * @head:	the place in the existing RCU-protected list to splice
282  *
283  * The list pointed to by @head can be RCU-read traversed concurrently with
284  * this function.
285  */
list_splice_rcu(struct list_head * list,struct list_head * head)286 static inline void list_splice_rcu(struct list_head *list,
287 				   struct list_head *head)
288 {
289 	if (!list_empty(list))
290 		__list_splice_rcu(list, head, head->next);
291 }
292 
293 /**
294  * __list_splice_init_rcu - join an RCU-protected list into an existing list.
295  * @list:	the RCU-protected list to splice
296  * @prev:	points to the last element of the existing list
297  * @next:	points to the first element of the existing list
298  * @sync:	synchronize_rcu, synchronize_rcu_expedited, ...
299  *
300  * The list pointed to by @prev and @next can be RCU-read traversed
301  * concurrently with this function.
302  *
303  * Note that this function blocks.
304  *
305  * Important note: the caller must take whatever action is necessary to prevent
306  * any other updates to the existing list.  In principle, it is possible to
307  * modify the list as soon as sync() begins execution. If this sort of thing
308  * becomes necessary, an alternative version based on call_rcu() could be
309  * created.  But only if -really- needed -- there is no shortage of RCU API
310  * members.
311  */
__list_splice_init_rcu(struct list_head * list,struct list_head * prev,struct list_head * next,void (* sync)(void))312 static inline void __list_splice_init_rcu(struct list_head *list,
313 					  struct list_head *prev,
314 					  struct list_head *next,
315 					  void (*sync)(void))
316 {
317 	struct list_head *first = list->next;
318 	struct list_head *last = list->prev;
319 
320 	/*
321 	 * "first" and "last" tracking list, so initialize it.  RCU readers
322 	 * have access to this list, so we must use INIT_LIST_HEAD_RCU()
323 	 * instead of INIT_LIST_HEAD().
324 	 */
325 
326 	INIT_LIST_HEAD_RCU(list);
327 
328 	/*
329 	 * At this point, the list body still points to the source list.
330 	 * Wait for any readers to finish using the list before splicing
331 	 * the list body into the new list.  Any new readers will see
332 	 * an empty list.
333 	 */
334 
335 	sync();
336 	ASSERT_EXCLUSIVE_ACCESS(*first);
337 	ASSERT_EXCLUSIVE_ACCESS(*last);
338 
339 	/*
340 	 * Readers are finished with the source list, so perform splice.
341 	 * The order is important if the new list is global and accessible
342 	 * to concurrent RCU readers.  Note that RCU readers are not
343 	 * permitted to traverse the prev pointers without excluding
344 	 * this function.
345 	 */
346 
347 	last->next = next;
348 	rcu_assign_pointer(list_next_rcu(prev), first);
349 	first->prev = prev;
350 	next->prev = last;
351 }
352 
353 /**
354  * list_splice_init_rcu - splice an RCU-protected list into an existing list,
355  *                        designed for stacks.
356  * @list:	the RCU-protected list to splice
357  * @head:	the place in the existing list to splice the first list into
358  * @sync:	synchronize_rcu, synchronize_rcu_expedited, ...
359  */
list_splice_init_rcu(struct list_head * list,struct list_head * head,void (* sync)(void))360 static inline void list_splice_init_rcu(struct list_head *list,
361 					struct list_head *head,
362 					void (*sync)(void))
363 {
364 	if (!list_empty(list))
365 		__list_splice_init_rcu(list, head, head->next, sync);
366 }
367 
368 /**
369  * list_splice_tail_init_rcu - splice an RCU-protected list into an existing
370  *                             list, designed for queues.
371  * @list:	the RCU-protected list to splice
372  * @head:	the place in the existing list to splice the first list into
373  * @sync:	synchronize_rcu, synchronize_rcu_expedited, ...
374  */
list_splice_tail_init_rcu(struct list_head * list,struct list_head * head,void (* sync)(void))375 static inline void list_splice_tail_init_rcu(struct list_head *list,
376 					     struct list_head *head,
377 					     void (*sync)(void))
378 {
379 	if (!list_empty(list))
380 		__list_splice_init_rcu(list, head->prev, head, sync);
381 }
382 
383 /**
384  * list_entry_rcu - get the struct for this entry
385  * @ptr:        the &struct list_head pointer.
386  * @type:       the type of the struct this is embedded in.
387  * @member:     the name of the list_head within the struct.
388  *
389  * This primitive may safely run concurrently with the _rcu list-mutation
390  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
391  */
392 #define list_entry_rcu(ptr, type, member) \
393 	container_of(READ_ONCE(ptr), type, member)
394 
395 /*
396  * Where are list_empty_rcu() and list_first_entry_rcu()?
397  *
398  * They do not exist because they would lead to subtle race conditions:
399  *
400  * if (!list_empty_rcu(mylist)) {
401  *	struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
402  *	do_something(bar);
403  * }
404  *
405  * The list might be non-empty when list_empty_rcu() checks it, but it
406  * might have become empty by the time that list_first_entry_rcu() rereads
407  * the ->next pointer, which would result in a SEGV.
408  *
409  * When not using RCU, it is OK for list_first_entry() to re-read that
410  * pointer because both functions should be protected by some lock that
411  * blocks writers.
412  *
413  * When using RCU, list_empty() uses READ_ONCE() to fetch the
414  * RCU-protected ->next pointer and then compares it to the address of the
415  * list head.  However, it neither dereferences this pointer nor provides
416  * this pointer to its caller.  Thus, READ_ONCE() suffices (that is,
417  * rcu_dereference() is not needed), which means that list_empty() can be
418  * used anywhere you would want to use list_empty_rcu().  Just don't
419  * expect anything useful to happen if you do a subsequent lockless
420  * call to list_first_entry_rcu()!!!
421  *
422  * See list_first_or_null_rcu for an alternative.
423  */
424 
425 /**
426  * list_first_or_null_rcu - get the first element from a list
427  * @ptr:        the list head to take the element from.
428  * @type:       the type of the struct this is embedded in.
429  * @member:     the name of the list_head within the struct.
430  *
431  * Note that if the list is empty, it returns NULL.
432  *
433  * This primitive may safely run concurrently with the _rcu list-mutation
434  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
435  */
436 #define list_first_or_null_rcu(ptr, type, member) \
437 ({ \
438 	struct list_head *__ptr = (ptr); \
439 	struct list_head *__next = READ_ONCE(__ptr->next); \
440 	likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
441 })
442 
443 /**
444  * list_next_or_null_rcu - get the next element from a list
445  * @head:	the head for the list.
446  * @ptr:        the list head to take the next element from.
447  * @type:       the type of the struct this is embedded in.
448  * @member:     the name of the list_head within the struct.
449  *
450  * Note that if the ptr is at the end of the list, NULL is returned.
451  *
452  * This primitive may safely run concurrently with the _rcu list-mutation
453  * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
454  */
455 #define list_next_or_null_rcu(head, ptr, type, member) \
456 ({ \
457 	struct list_head *__head = (head); \
458 	struct list_head *__ptr = (ptr); \
459 	struct list_head *__next = READ_ONCE(__ptr->next); \
460 	likely(__next != __head) ? list_entry_rcu(__next, type, \
461 						  member) : NULL; \
462 })
463 
464 /**
465  * list_for_each_entry_rcu	-	iterate over rcu list of given type
466  * @pos:	the type * to use as a loop cursor.
467  * @head:	the head for your list.
468  * @member:	the name of the list_head within the struct.
469  * @cond:	optional lockdep expression if called from non-RCU protection.
470  *
471  * This list-traversal primitive may safely run concurrently with
472  * the _rcu list-mutation primitives such as list_add_rcu()
473  * as long as the traversal is guarded by rcu_read_lock().
474  */
475 #define list_for_each_entry_rcu(pos, head, member, cond...)		\
476 	for (__list_check_rcu(dummy, ## cond, 0),			\
477 	     pos = list_entry_rcu((head)->next, typeof(*pos), member);	\
478 		&pos->member != (head);					\
479 		pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
480 
481 /**
482  * list_for_each_entry_srcu	-	iterate over rcu list of given type
483  * @pos:	the type * to use as a loop cursor.
484  * @head:	the head for your list.
485  * @member:	the name of the list_head within the struct.
486  * @cond:	lockdep expression for the lock required to traverse the list.
487  *
488  * This list-traversal primitive may safely run concurrently with
489  * the _rcu list-mutation primitives such as list_add_rcu()
490  * as long as the traversal is guarded by srcu_read_lock().
491  * The lockdep expression srcu_read_lock_held() can be passed as the
492  * cond argument from read side.
493  */
494 #define list_for_each_entry_srcu(pos, head, member, cond)		\
495 	for (__list_check_srcu(cond),					\
496 	     pos = list_entry_rcu((head)->next, typeof(*pos), member);	\
497 		&pos->member != (head);					\
498 		pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
499 
500 /**
501  * list_entry_lockless - get the struct for this entry
502  * @ptr:        the &struct list_head pointer.
503  * @type:       the type of the struct this is embedded in.
504  * @member:     the name of the list_head within the struct.
505  *
506  * This primitive may safely run concurrently with the _rcu
507  * list-mutation primitives such as list_add_rcu(), but requires some
508  * implicit RCU read-side guarding.  One example is running within a special
509  * exception-time environment where preemption is disabled and where lockdep
510  * cannot be invoked.  Another example is when items are added to the list,
511  * but never deleted.
512  */
513 #define list_entry_lockless(ptr, type, member) \
514 	container_of((typeof(ptr))READ_ONCE(ptr), type, member)
515 
516 /**
517  * list_for_each_entry_lockless - iterate over rcu list of given type
518  * @pos:	the type * to use as a loop cursor.
519  * @head:	the head for your list.
520  * @member:	the name of the list_struct within the struct.
521  *
522  * This primitive may safely run concurrently with the _rcu
523  * list-mutation primitives such as list_add_rcu(), but requires some
524  * implicit RCU read-side guarding.  One example is running within a special
525  * exception-time environment where preemption is disabled and where lockdep
526  * cannot be invoked.  Another example is when items are added to the list,
527  * but never deleted.
528  */
529 #define list_for_each_entry_lockless(pos, head, member) \
530 	for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
531 	     &pos->member != (head); \
532 	     pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
533 
534 /**
535  * list_for_each_entry_continue_rcu - continue iteration over list of given type
536  * @pos:	the type * to use as a loop cursor.
537  * @head:	the head for your list.
538  * @member:	the name of the list_head within the struct.
539  *
540  * Continue to iterate over list of given type, continuing after
541  * the current position which must have been in the list when the RCU read
542  * lock was taken.
543  * This would typically require either that you obtained the node from a
544  * previous walk of the list in the same RCU read-side critical section, or
545  * that you held some sort of non-RCU reference (such as a reference count)
546  * to keep the node alive *and* in the list.
547  *
548  * This iterator is similar to list_for_each_entry_from_rcu() except
549  * this starts after the given position and that one starts at the given
550  * position.
551  */
552 #define list_for_each_entry_continue_rcu(pos, head, member) 		\
553 	for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
554 	     &pos->member != (head);	\
555 	     pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
556 
557 /**
558  * list_for_each_entry_from_rcu - iterate over a list from current point
559  * @pos:	the type * to use as a loop cursor.
560  * @head:	the head for your list.
561  * @member:	the name of the list_node within the struct.
562  *
563  * Iterate over the tail of a list starting from a given position,
564  * which must have been in the list when the RCU read lock was taken.
565  * This would typically require either that you obtained the node from a
566  * previous walk of the list in the same RCU read-side critical section, or
567  * that you held some sort of non-RCU reference (such as a reference count)
568  * to keep the node alive *and* in the list.
569  *
570  * This iterator is similar to list_for_each_entry_continue_rcu() except
571  * this starts from the given position and that one starts from the position
572  * after the given position.
573  */
574 #define list_for_each_entry_from_rcu(pos, head, member)			\
575 	for (; &(pos)->member != (head);					\
576 		pos = list_entry_rcu(pos->member.next, typeof(*(pos)), member))
577 
578 /**
579  * hlist_del_rcu - deletes entry from hash list without re-initialization
580  * @n: the element to delete from the hash list.
581  *
582  * Note: list_unhashed() on entry does not return true after this,
583  * the entry is in an undefined state. It is useful for RCU based
584  * lockfree traversal.
585  *
586  * In particular, it means that we can not poison the forward
587  * pointers that may still be used for walking the hash list.
588  *
589  * The caller must take whatever precautions are necessary
590  * (such as holding appropriate locks) to avoid racing
591  * with another list-mutation primitive, such as hlist_add_head_rcu()
592  * or hlist_del_rcu(), running on this same list.
593  * However, it is perfectly legal to run concurrently with
594  * the _rcu list-traversal primitives, such as
595  * hlist_for_each_entry().
596  */
hlist_del_rcu(struct hlist_node * n)597 static inline void hlist_del_rcu(struct hlist_node *n)
598 {
599 	__hlist_del(n);
600 	WRITE_ONCE(n->pprev, LIST_POISON2);
601 }
602 
603 /**
604  * hlist_replace_rcu - replace old entry by new one
605  * @old : the element to be replaced
606  * @new : the new element to insert
607  *
608  * The @old entry will be replaced with the @new entry atomically from
609  * the perspective of concurrent readers.  It is the caller's responsibility
610  * to synchronize with concurrent updaters, if any.
611  */
hlist_replace_rcu(struct hlist_node * old,struct hlist_node * new)612 static inline void hlist_replace_rcu(struct hlist_node *old,
613 					struct hlist_node *new)
614 {
615 	struct hlist_node *next = old->next;
616 
617 	new->next = next;
618 	WRITE_ONCE(new->pprev, old->pprev);
619 	rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
620 	if (next)
621 		WRITE_ONCE(new->next->pprev, &new->next);
622 	WRITE_ONCE(old->pprev, LIST_POISON2);
623 }
624 
625 /**
626  * hlists_swap_heads_rcu - swap the lists the hlist heads point to
627  * @left:  The hlist head on the left
628  * @right: The hlist head on the right
629  *
630  * The lists start out as [@left  ][node1 ... ] and
631  *                        [@right ][node2 ... ]
632  * The lists end up as    [@left  ][node2 ... ]
633  *                        [@right ][node1 ... ]
634  */
hlists_swap_heads_rcu(struct hlist_head * left,struct hlist_head * right)635 static inline void hlists_swap_heads_rcu(struct hlist_head *left, struct hlist_head *right)
636 {
637 	struct hlist_node *node1 = left->first;
638 	struct hlist_node *node2 = right->first;
639 
640 	rcu_assign_pointer(left->first, node2);
641 	rcu_assign_pointer(right->first, node1);
642 	WRITE_ONCE(node2->pprev, &left->first);
643 	WRITE_ONCE(node1->pprev, &right->first);
644 }
645 
646 /*
647  * return the first or the next element in an RCU protected hlist
648  */
649 #define hlist_first_rcu(head)	(*((struct hlist_node __rcu **)(&(head)->first)))
650 #define hlist_next_rcu(node)	(*((struct hlist_node __rcu **)(&(node)->next)))
651 #define hlist_pprev_rcu(node)	(*((struct hlist_node __rcu **)((node)->pprev)))
652 
653 /**
654  * hlist_add_head_rcu
655  * @n: the element to add to the hash list.
656  * @h: the list to add to.
657  *
658  * Description:
659  * Adds the specified element to the specified hlist,
660  * while permitting racing traversals.
661  *
662  * The caller must take whatever precautions are necessary
663  * (such as holding appropriate locks) to avoid racing
664  * with another list-mutation primitive, such as hlist_add_head_rcu()
665  * or hlist_del_rcu(), running on this same list.
666  * However, it is perfectly legal to run concurrently with
667  * the _rcu list-traversal primitives, such as
668  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
669  * problems on Alpha CPUs.  Regardless of the type of CPU, the
670  * list-traversal primitive must be guarded by rcu_read_lock().
671  */
hlist_add_head_rcu(struct hlist_node * n,struct hlist_head * h)672 static inline void hlist_add_head_rcu(struct hlist_node *n,
673 					struct hlist_head *h)
674 {
675 	struct hlist_node *first = h->first;
676 
677 	n->next = first;
678 	WRITE_ONCE(n->pprev, &h->first);
679 	rcu_assign_pointer(hlist_first_rcu(h), n);
680 	if (first)
681 		WRITE_ONCE(first->pprev, &n->next);
682 }
683 
684 /**
685  * hlist_add_tail_rcu
686  * @n: the element to add to the hash list.
687  * @h: the list to add to.
688  *
689  * Description:
690  * Adds the specified element to the specified hlist,
691  * while permitting racing traversals.
692  *
693  * The caller must take whatever precautions are necessary
694  * (such as holding appropriate locks) to avoid racing
695  * with another list-mutation primitive, such as hlist_add_head_rcu()
696  * or hlist_del_rcu(), running on this same list.
697  * However, it is perfectly legal to run concurrently with
698  * the _rcu list-traversal primitives, such as
699  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
700  * problems on Alpha CPUs.  Regardless of the type of CPU, the
701  * list-traversal primitive must be guarded by rcu_read_lock().
702  */
hlist_add_tail_rcu(struct hlist_node * n,struct hlist_head * h)703 static inline void hlist_add_tail_rcu(struct hlist_node *n,
704 				      struct hlist_head *h)
705 {
706 	struct hlist_node *i, *last = NULL;
707 
708 	/* Note: write side code, so rcu accessors are not needed. */
709 	for (i = h->first; i; i = i->next)
710 		last = i;
711 
712 	if (last) {
713 		n->next = last->next;
714 		WRITE_ONCE(n->pprev, &last->next);
715 		rcu_assign_pointer(hlist_next_rcu(last), n);
716 	} else {
717 		hlist_add_head_rcu(n, h);
718 	}
719 }
720 
721 /**
722  * hlist_add_before_rcu
723  * @n: the new element to add to the hash list.
724  * @next: the existing element to add the new element before.
725  *
726  * Description:
727  * Adds the specified element to the specified hlist
728  * before the specified node while permitting racing traversals.
729  *
730  * The caller must take whatever precautions are necessary
731  * (such as holding appropriate locks) to avoid racing
732  * with another list-mutation primitive, such as hlist_add_head_rcu()
733  * or hlist_del_rcu(), running on this same list.
734  * However, it is perfectly legal to run concurrently with
735  * the _rcu list-traversal primitives, such as
736  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
737  * problems on Alpha CPUs.
738  */
hlist_add_before_rcu(struct hlist_node * n,struct hlist_node * next)739 static inline void hlist_add_before_rcu(struct hlist_node *n,
740 					struct hlist_node *next)
741 {
742 	WRITE_ONCE(n->pprev, next->pprev);
743 	n->next = next;
744 	rcu_assign_pointer(hlist_pprev_rcu(n), n);
745 	WRITE_ONCE(next->pprev, &n->next);
746 }
747 
748 /**
749  * hlist_add_behind_rcu
750  * @n: the new element to add to the hash list.
751  * @prev: the existing element to add the new element after.
752  *
753  * Description:
754  * Adds the specified element to the specified hlist
755  * after the specified node while permitting racing traversals.
756  *
757  * The caller must take whatever precautions are necessary
758  * (such as holding appropriate locks) to avoid racing
759  * with another list-mutation primitive, such as hlist_add_head_rcu()
760  * or hlist_del_rcu(), running on this same list.
761  * However, it is perfectly legal to run concurrently with
762  * the _rcu list-traversal primitives, such as
763  * hlist_for_each_entry_rcu(), used to prevent memory-consistency
764  * problems on Alpha CPUs.
765  */
hlist_add_behind_rcu(struct hlist_node * n,struct hlist_node * prev)766 static inline void hlist_add_behind_rcu(struct hlist_node *n,
767 					struct hlist_node *prev)
768 {
769 	n->next = prev->next;
770 	WRITE_ONCE(n->pprev, &prev->next);
771 	rcu_assign_pointer(hlist_next_rcu(prev), n);
772 	if (n->next)
773 		WRITE_ONCE(n->next->pprev, &n->next);
774 }
775 
776 #define __hlist_for_each_rcu(pos, head)				\
777 	for (pos = rcu_dereference(hlist_first_rcu(head));	\
778 	     pos;						\
779 	     pos = rcu_dereference(hlist_next_rcu(pos)))
780 
781 /**
782  * hlist_for_each_entry_rcu - iterate over rcu list of given type
783  * @pos:	the type * to use as a loop cursor.
784  * @head:	the head for your list.
785  * @member:	the name of the hlist_node within the struct.
786  * @cond:	optional lockdep expression if called from non-RCU protection.
787  *
788  * This list-traversal primitive may safely run concurrently with
789  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
790  * as long as the traversal is guarded by rcu_read_lock().
791  */
792 #define hlist_for_each_entry_rcu(pos, head, member, cond...)		\
793 	for (__list_check_rcu(dummy, ## cond, 0),			\
794 	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
795 			typeof(*(pos)), member);			\
796 		pos;							\
797 		pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
798 			&(pos)->member)), typeof(*(pos)), member))
799 
800 /**
801  * hlist_for_each_entry_srcu - iterate over rcu list of given type
802  * @pos:	the type * to use as a loop cursor.
803  * @head:	the head for your list.
804  * @member:	the name of the hlist_node within the struct.
805  * @cond:	lockdep expression for the lock required to traverse the list.
806  *
807  * This list-traversal primitive may safely run concurrently with
808  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
809  * as long as the traversal is guarded by srcu_read_lock().
810  * The lockdep expression srcu_read_lock_held() can be passed as the
811  * cond argument from read side.
812  */
813 #define hlist_for_each_entry_srcu(pos, head, member, cond)		\
814 	for (__list_check_srcu(cond),					\
815 	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
816 			typeof(*(pos)), member);			\
817 		pos;							\
818 		pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
819 			&(pos)->member)), typeof(*(pos)), member))
820 
821 /**
822  * hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
823  * @pos:	the type * to use as a loop cursor.
824  * @head:	the head for your list.
825  * @member:	the name of the hlist_node within the struct.
826  *
827  * This list-traversal primitive may safely run concurrently with
828  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
829  * as long as the traversal is guarded by rcu_read_lock().
830  *
831  * This is the same as hlist_for_each_entry_rcu() except that it does
832  * not do any RCU debugging or tracing.
833  */
834 #define hlist_for_each_entry_rcu_notrace(pos, head, member)			\
835 	for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
836 			typeof(*(pos)), member);			\
837 		pos;							\
838 		pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
839 			&(pos)->member)), typeof(*(pos)), member))
840 
841 /**
842  * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
843  * @pos:	the type * to use as a loop cursor.
844  * @head:	the head for your list.
845  * @member:	the name of the hlist_node within the struct.
846  *
847  * This list-traversal primitive may safely run concurrently with
848  * the _rcu list-mutation primitives such as hlist_add_head_rcu()
849  * as long as the traversal is guarded by rcu_read_lock().
850  */
851 #define hlist_for_each_entry_rcu_bh(pos, head, member)			\
852 	for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
853 			typeof(*(pos)), member);			\
854 		pos;							\
855 		pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
856 			&(pos)->member)), typeof(*(pos)), member))
857 
858 /**
859  * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
860  * @pos:	the type * to use as a loop cursor.
861  * @member:	the name of the hlist_node within the struct.
862  */
863 #define hlist_for_each_entry_continue_rcu(pos, member)			\
864 	for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
865 			&(pos)->member)), typeof(*(pos)), member);	\
866 	     pos;							\
867 	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(	\
868 			&(pos)->member)), typeof(*(pos)), member))
869 
870 /**
871  * hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
872  * @pos:	the type * to use as a loop cursor.
873  * @member:	the name of the hlist_node within the struct.
874  */
875 #define hlist_for_each_entry_continue_rcu_bh(pos, member)		\
876 	for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(  \
877 			&(pos)->member)), typeof(*(pos)), member);	\
878 	     pos;							\
879 	     pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(	\
880 			&(pos)->member)), typeof(*(pos)), member))
881 
882 /**
883  * hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
884  * @pos:	the type * to use as a loop cursor.
885  * @member:	the name of the hlist_node within the struct.
886  */
887 #define hlist_for_each_entry_from_rcu(pos, member)			\
888 	for (; pos;							\
889 	     pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(	\
890 			&(pos)->member)), typeof(*(pos)), member))
891 
892 #endif	/* __KERNEL__ */
893 #endif
894