xref: /linux/lib/closure.c (revision fdd5ecbbff751c3b9061d8ebb08e5c96119915b4)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Asynchronous refcounty things
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8 
9 #include <linux/closure.h>
10 #include <linux/debugfs.h>
11 #include <linux/export.h>
12 #include <linux/rcupdate.h>
13 #include <linux/seq_file.h>
14 #include <linux/sched/debug.h>
15 
16 static inline void closure_put_after_sub_checks(int flags)
17 {
18 	int r = flags & CLOSURE_REMAINING_MASK;
19 
20 	if (WARN(flags & CLOSURE_GUARD_MASK,
21 		 "closure has guard bits set: %x (%u)",
22 		 flags & CLOSURE_GUARD_MASK, (unsigned) __fls(r)))
23 		r &= ~CLOSURE_GUARD_MASK;
24 
25 	WARN(!r && (flags & ~CLOSURE_DESTRUCTOR),
26 	     "closure ref hit 0 with incorrect flags set: %x (%u)",
27 	     flags & ~CLOSURE_DESTRUCTOR, (unsigned) __fls(flags));
28 }
29 
30 static inline void closure_put_after_sub(struct closure *cl, int flags)
31 {
32 	closure_put_after_sub_checks(flags);
33 
34 	if (!(flags & CLOSURE_REMAINING_MASK)) {
35 		smp_acquire__after_ctrl_dep();
36 
37 		cl->closure_get_happened = false;
38 
39 		if (cl->fn && !(flags & CLOSURE_DESTRUCTOR)) {
40 			atomic_set(&cl->remaining,
41 				   CLOSURE_REMAINING_INITIALIZER);
42 			closure_queue(cl);
43 		} else {
44 			struct closure *parent = cl->parent;
45 			closure_fn *destructor = cl->fn;
46 
47 			closure_debug_destroy(cl);
48 
49 			if (destructor)
50 				destructor(&cl->work);
51 
52 			if (parent)
53 				closure_put(parent);
54 		}
55 	}
56 }
57 
58 /* For clearing flags with the same atomic op as a put */
59 void closure_sub(struct closure *cl, int v)
60 {
61 	closure_put_after_sub(cl, atomic_sub_return_release(v, &cl->remaining));
62 }
63 EXPORT_SYMBOL(closure_sub);
64 
65 /*
66  * closure_put - decrement a closure's refcount
67  */
68 void closure_put(struct closure *cl)
69 {
70 	closure_put_after_sub(cl, atomic_dec_return_release(&cl->remaining));
71 }
72 EXPORT_SYMBOL(closure_put);
73 
74 /*
75  * closure_wake_up - wake up all closures on a wait list, without memory barrier
76  */
77 void __closure_wake_up(struct closure_waitlist *wait_list)
78 {
79 	struct llist_node *list;
80 	struct closure *cl, *t;
81 	struct llist_node *reverse = NULL;
82 
83 	list = llist_del_all(&wait_list->list);
84 
85 	/* We first reverse the list to preserve FIFO ordering and fairness */
86 	reverse = llist_reverse_order(list);
87 
88 	/* Then do the wakeups */
89 	llist_for_each_entry_safe(cl, t, reverse, list) {
90 		closure_set_waiting(cl, 0);
91 		closure_sub(cl, CLOSURE_WAITING + 1);
92 	}
93 }
94 EXPORT_SYMBOL(__closure_wake_up);
95 
96 /**
97  * closure_wait - add a closure to a waitlist
98  * @waitlist: will own a ref on @cl, which will be released when
99  * closure_wake_up() is called on @waitlist.
100  * @cl: closure pointer.
101  *
102  */
103 bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
104 {
105 	if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
106 		return false;
107 
108 	cl->closure_get_happened = true;
109 	closure_set_waiting(cl, _RET_IP_);
110 	atomic_add(CLOSURE_WAITING + 1, &cl->remaining);
111 	llist_add(&cl->list, &waitlist->list);
112 
113 	return true;
114 }
115 EXPORT_SYMBOL(closure_wait);
116 
117 struct closure_syncer {
118 	struct task_struct	*task;
119 	int			done;
120 };
121 
122 static CLOSURE_CALLBACK(closure_sync_fn)
123 {
124 	struct closure *cl = container_of(ws, struct closure, work);
125 	struct closure_syncer *s = cl->s;
126 	struct task_struct *p;
127 
128 	rcu_read_lock();
129 	p = READ_ONCE(s->task);
130 	s->done = 1;
131 	wake_up_process(p);
132 	rcu_read_unlock();
133 }
134 
135 void __sched __closure_sync(struct closure *cl)
136 {
137 	struct closure_syncer s = { .task = current };
138 
139 	cl->s = &s;
140 	continue_at(cl, closure_sync_fn, NULL);
141 
142 	while (1) {
143 		set_current_state(TASK_UNINTERRUPTIBLE);
144 		if (s.done)
145 			break;
146 		schedule();
147 	}
148 
149 	__set_current_state(TASK_RUNNING);
150 }
151 EXPORT_SYMBOL(__closure_sync);
152 
153 /*
154  * closure_return_sync - finish running a closure, synchronously (i.e. waiting
155  * for outstanding get()s to finish) and returning once closure refcount is 0.
156  *
157  * Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
158  * closure_get_not_zero() calls waill fail.
159  */
160 void __sched closure_return_sync(struct closure *cl)
161 {
162 	struct closure_syncer s = { .task = current };
163 
164 	cl->s = &s;
165 	set_closure_fn(cl, closure_sync_fn, NULL);
166 
167 	unsigned flags = atomic_sub_return_release(1 + CLOSURE_RUNNING - CLOSURE_DESTRUCTOR,
168 						   &cl->remaining);
169 
170 	closure_put_after_sub_checks(flags);
171 
172 	if (unlikely(flags & CLOSURE_REMAINING_MASK)) {
173 		while (1) {
174 			set_current_state(TASK_UNINTERRUPTIBLE);
175 			if (s.done)
176 				break;
177 			schedule();
178 		}
179 
180 		__set_current_state(TASK_RUNNING);
181 	}
182 
183 	if (cl->parent)
184 		closure_put(cl->parent);
185 }
186 EXPORT_SYMBOL(closure_return_sync);
187 
188 int __sched __closure_sync_timeout(struct closure *cl, unsigned long timeout)
189 {
190 	struct closure_syncer s = { .task = current };
191 	int ret = 0;
192 
193 	cl->s = &s;
194 	continue_at(cl, closure_sync_fn, NULL);
195 
196 	while (1) {
197 		set_current_state(TASK_UNINTERRUPTIBLE);
198 		if (s.done)
199 			break;
200 		if (!timeout) {
201 			/*
202 			 * Carefully undo the continue_at() - but only if it
203 			 * hasn't completed, i.e. the final closure_put() hasn't
204 			 * happened yet:
205 			 */
206 			unsigned old, new, v = atomic_read(&cl->remaining);
207 			do {
208 				old = v;
209 				if (!old || (old & CLOSURE_RUNNING))
210 					goto success;
211 
212 				new = old + CLOSURE_REMAINING_INITIALIZER;
213 			} while ((v = atomic_cmpxchg(&cl->remaining, old, new)) != old);
214 			ret = -ETIME;
215 		}
216 
217 		timeout = schedule_timeout(timeout);
218 	}
219 success:
220 	__set_current_state(TASK_RUNNING);
221 	return ret;
222 }
223 EXPORT_SYMBOL(__closure_sync_timeout);
224 
225 #ifdef CONFIG_DEBUG_CLOSURES
226 
227 static LIST_HEAD(closure_list);
228 static DEFINE_SPINLOCK(closure_list_lock);
229 
230 void closure_debug_create(struct closure *cl)
231 {
232 	unsigned long flags;
233 
234 	BUG_ON(cl->magic == CLOSURE_MAGIC_ALIVE);
235 	cl->magic = CLOSURE_MAGIC_ALIVE;
236 
237 	spin_lock_irqsave(&closure_list_lock, flags);
238 	list_add(&cl->all, &closure_list);
239 	spin_unlock_irqrestore(&closure_list_lock, flags);
240 }
241 EXPORT_SYMBOL(closure_debug_create);
242 
243 void closure_debug_destroy(struct closure *cl)
244 {
245 	unsigned long flags;
246 
247 	BUG_ON(cl->magic != CLOSURE_MAGIC_ALIVE);
248 	cl->magic = CLOSURE_MAGIC_DEAD;
249 
250 	spin_lock_irqsave(&closure_list_lock, flags);
251 	list_del(&cl->all);
252 	spin_unlock_irqrestore(&closure_list_lock, flags);
253 }
254 EXPORT_SYMBOL(closure_debug_destroy);
255 
256 static int debug_show(struct seq_file *f, void *data)
257 {
258 	struct closure *cl;
259 
260 	spin_lock_irq(&closure_list_lock);
261 
262 	list_for_each_entry(cl, &closure_list, all) {
263 		int r = atomic_read(&cl->remaining);
264 
265 		seq_printf(f, "%p: %pS -> %pS p %p r %i ",
266 			   cl, (void *) cl->ip, cl->fn, cl->parent,
267 			   r & CLOSURE_REMAINING_MASK);
268 
269 		seq_printf(f, "%s%s\n",
270 			   test_bit(WORK_STRUCT_PENDING_BIT,
271 				    work_data_bits(&cl->work)) ? "Q" : "",
272 			   r & CLOSURE_RUNNING	? "R" : "");
273 
274 		if (r & CLOSURE_WAITING)
275 			seq_printf(f, " W %pS\n",
276 				   (void *) cl->waiting_on);
277 
278 		seq_puts(f, "\n");
279 	}
280 
281 	spin_unlock_irq(&closure_list_lock);
282 	return 0;
283 }
284 
285 DEFINE_SHOW_ATTRIBUTE(debug);
286 
287 static int __init closure_debug_init(void)
288 {
289 	debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops);
290 	return 0;
291 }
292 late_initcall(closure_debug_init)
293 
294 #endif
295