xref: /linux/include/linux/rcuwait.h (revision 58d4292bd037b01fbb940a5170817f7d40caa9d5)
1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
28f95c90cSDavidlohr Bueso #ifndef _LINUX_RCUWAIT_H_
38f95c90cSDavidlohr Bueso #define _LINUX_RCUWAIT_H_
48f95c90cSDavidlohr Bueso 
58f95c90cSDavidlohr Bueso #include <linux/rcupdate.h>
680fbaf1cSPeter Zijlstra (Intel) #include <linux/sched/signal.h>
78f95c90cSDavidlohr Bueso 
88f95c90cSDavidlohr Bueso /*
98f95c90cSDavidlohr Bueso  * rcuwait provides a way of blocking and waking up a single
10154abafcSEric W. Biederman  * task in an rcu-safe manner.
118f95c90cSDavidlohr Bueso  *
12154abafcSEric W. Biederman  * The only time @task is non-nil is when a user is blocked (or
13154abafcSEric W. Biederman  * checking if it needs to) on a condition, and reset as soon as we
14154abafcSEric W. Biederman  * know that the condition has succeeded and are awoken.
158f95c90cSDavidlohr Bueso  */
168f95c90cSDavidlohr Bueso struct rcuwait {
1703f4b48eSJoel Fernandes (Google) 	struct task_struct __rcu *task;
188f95c90cSDavidlohr Bueso };
198f95c90cSDavidlohr Bueso 
208f95c90cSDavidlohr Bueso #define __RCUWAIT_INITIALIZER(name)		\
218f95c90cSDavidlohr Bueso 	{ .task = NULL, }
228f95c90cSDavidlohr Bueso 
238f95c90cSDavidlohr Bueso static inline void rcuwait_init(struct rcuwait *w)
248f95c90cSDavidlohr Bueso {
258f95c90cSDavidlohr Bueso 	w->task = NULL;
268f95c90cSDavidlohr Bueso }
278f95c90cSDavidlohr Bueso 
28191a43beSDavidlohr Bueso /*
29191a43beSDavidlohr Bueso  * Note: this provides no serialization and, just as with waitqueues,
30191a43beSDavidlohr Bueso  * requires care to estimate as to whether or not the wait is active.
31191a43beSDavidlohr Bueso  */
32191a43beSDavidlohr Bueso static inline int rcuwait_active(struct rcuwait *w)
33191a43beSDavidlohr Bueso {
34febd668dSPaolo Bonzini 	return !!rcu_access_pointer(w->task);
35191a43beSDavidlohr Bueso }
36191a43beSDavidlohr Bueso 
379d9a6ebfSDavidlohr Bueso extern int rcuwait_wake_up(struct rcuwait *w);
388f95c90cSDavidlohr Bueso 
398f95c90cSDavidlohr Bueso /*
408f95c90cSDavidlohr Bueso  * The caller is responsible for locking around rcuwait_wait_event(),
415c21f7b3SDavidlohr Bueso  * and [prepare_to/finish]_rcuwait() such that writes to @task are
425c21f7b3SDavidlohr Bueso  * properly serialized.
438f95c90cSDavidlohr Bueso  */
445c21f7b3SDavidlohr Bueso 
455c21f7b3SDavidlohr Bueso static inline void prepare_to_rcuwait(struct rcuwait *w)
465c21f7b3SDavidlohr Bueso {
475c21f7b3SDavidlohr Bueso 	rcu_assign_pointer(w->task, current);
485c21f7b3SDavidlohr Bueso }
495c21f7b3SDavidlohr Bueso 
50*58d4292bSIngo Molnar extern void finish_rcuwait(struct rcuwait *w);
515c21f7b3SDavidlohr Bueso 
5280fbaf1cSPeter Zijlstra (Intel) #define rcuwait_wait_event(w, condition, state)				\
538f95c90cSDavidlohr Bueso ({									\
5480fbaf1cSPeter Zijlstra (Intel) 	int __ret = 0;							\
555c21f7b3SDavidlohr Bueso 	prepare_to_rcuwait(w);						\
568f95c90cSDavidlohr Bueso 	for (;;) {							\
578f95c90cSDavidlohr Bueso 		/*							\
588f95c90cSDavidlohr Bueso 		 * Implicit barrier (A) pairs with (B) in		\
597e1f9467SDavidlohr Bueso 		 * rcuwait_wake_up().					\
608f95c90cSDavidlohr Bueso 		 */							\
6180fbaf1cSPeter Zijlstra (Intel) 		set_current_state(state);				\
628f95c90cSDavidlohr Bueso 		if (condition)						\
638f95c90cSDavidlohr Bueso 			break;						\
648f95c90cSDavidlohr Bueso 									\
6580fbaf1cSPeter Zijlstra (Intel) 		if (signal_pending_state(state, current)) {		\
6680fbaf1cSPeter Zijlstra (Intel) 			__ret = -EINTR;					\
6780fbaf1cSPeter Zijlstra (Intel) 			break;						\
6880fbaf1cSPeter Zijlstra (Intel) 		}							\
6980fbaf1cSPeter Zijlstra (Intel) 									\
708f95c90cSDavidlohr Bueso 		schedule();						\
718f95c90cSDavidlohr Bueso 	}								\
725c21f7b3SDavidlohr Bueso 	finish_rcuwait(w);						\
7380fbaf1cSPeter Zijlstra (Intel) 	__ret;								\
748f95c90cSDavidlohr Bueso })
758f95c90cSDavidlohr Bueso 
768f95c90cSDavidlohr Bueso #endif /* _LINUX_RCUWAIT_H_ */
77