1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Support for async notification of waitid
4 */
5 #include <linux/kernel.h>
6 #include <linux/errno.h>
7 #include <linux/fs.h>
8 #include <linux/file.h>
9 #include <linux/compat.h>
10 #include <linux/io_uring.h>
11
12 #include <uapi/linux/io_uring.h>
13
14 #include "io_uring.h"
15 #include "cancel.h"
16 #include "waitid.h"
17 #include "../kernel/exit.h"
18
19 static void io_waitid_cb(struct io_tw_req tw_req, io_tw_token_t tw);
20
21 #define IO_WAITID_CANCEL_FLAG BIT(31)
22 #define IO_WAITID_REF_MASK GENMASK(30, 0)
23
24 struct io_waitid {
25 struct file *file;
26 int which;
27 pid_t upid;
28 int options;
29 atomic_t refs;
30 struct wait_queue_head *head;
31 struct siginfo __user *infop;
32 struct waitid_info info;
33 };
34
io_waitid_free(struct io_kiocb * req)35 static void io_waitid_free(struct io_kiocb *req)
36 {
37 struct io_waitid_async *iwa = req->async_data;
38
39 put_pid(iwa->wo.wo_pid);
40 io_req_async_data_free(req);
41 }
42
io_waitid_compat_copy_si(struct io_waitid * iw,int signo)43 static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo)
44 {
45 struct compat_siginfo __user *infop;
46 bool ret;
47
48 infop = (struct compat_siginfo __user *) iw->infop;
49
50 if (!user_write_access_begin(infop, sizeof(*infop)))
51 return false;
52
53 unsafe_put_user(signo, &infop->si_signo, Efault);
54 unsafe_put_user(0, &infop->si_errno, Efault);
55 unsafe_put_user(iw->info.cause, &infop->si_code, Efault);
56 unsafe_put_user(iw->info.pid, &infop->si_pid, Efault);
57 unsafe_put_user(iw->info.uid, &infop->si_uid, Efault);
58 unsafe_put_user(iw->info.status, &infop->si_status, Efault);
59 ret = true;
60 done:
61 user_write_access_end();
62 return ret;
63 Efault:
64 ret = false;
65 goto done;
66 }
67
io_waitid_copy_si(struct io_kiocb * req,int signo)68 static bool io_waitid_copy_si(struct io_kiocb *req, int signo)
69 {
70 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
71 bool ret;
72
73 if (!iw->infop)
74 return true;
75
76 if (io_is_compat(req->ctx))
77 return io_waitid_compat_copy_si(iw, signo);
78
79 if (!user_write_access_begin(iw->infop, sizeof(*iw->infop)))
80 return false;
81
82 unsafe_put_user(signo, &iw->infop->si_signo, Efault);
83 unsafe_put_user(0, &iw->infop->si_errno, Efault);
84 unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault);
85 unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault);
86 unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault);
87 unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault);
88 ret = true;
89 done:
90 user_write_access_end();
91 return ret;
92 Efault:
93 ret = false;
94 goto done;
95 }
96
io_waitid_finish(struct io_kiocb * req,int ret)97 static int io_waitid_finish(struct io_kiocb *req, int ret)
98 {
99 int signo = 0;
100
101 if (ret > 0) {
102 signo = SIGCHLD;
103 ret = 0;
104 }
105
106 if (!io_waitid_copy_si(req, signo))
107 ret = -EFAULT;
108 io_waitid_free(req);
109 return ret;
110 }
111
io_waitid_remove_wq(struct io_kiocb * req)112 static void io_waitid_remove_wq(struct io_kiocb *req)
113 {
114 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
115 struct wait_queue_head *head;
116
117 head = READ_ONCE(iw->head);
118 if (head) {
119 struct io_waitid_async *iwa = req->async_data;
120
121 iw->head = NULL;
122 spin_lock_irq(&head->lock);
123 list_del_init(&iwa->wo.child_wait.entry);
124 spin_unlock_irq(&head->lock);
125 }
126 }
127
io_waitid_complete(struct io_kiocb * req,int ret)128 static void io_waitid_complete(struct io_kiocb *req, int ret)
129 {
130 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
131
132 /* anyone completing better be holding a reference */
133 WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK));
134
135 lockdep_assert_held(&req->ctx->uring_lock);
136
137 hlist_del_init(&req->hash_node);
138 io_waitid_remove_wq(req);
139
140 ret = io_waitid_finish(req, ret);
141 if (ret < 0)
142 req_set_fail(req);
143 io_req_set_res(req, ret, 0);
144 }
145
__io_waitid_cancel(struct io_kiocb * req)146 static bool __io_waitid_cancel(struct io_kiocb *req)
147 {
148 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
149
150 lockdep_assert_held(&req->ctx->uring_lock);
151
152 /*
153 * Mark us canceled regardless of ownership. This will prevent a
154 * potential retry from a spurious wakeup.
155 */
156 atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs);
157
158 /* claim ownership */
159 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
160 return false;
161
162 io_waitid_complete(req, -ECANCELED);
163 io_req_queue_tw_complete(req, -ECANCELED);
164 return true;
165 }
166
io_waitid_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd,unsigned int issue_flags)167 int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
168 unsigned int issue_flags)
169 {
170 return io_cancel_remove(ctx, cd, issue_flags, &ctx->waitid_list, __io_waitid_cancel);
171 }
172
io_waitid_remove_all(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)173 bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
174 bool cancel_all)
175 {
176 return io_cancel_remove_all(ctx, tctx, &ctx->waitid_list, cancel_all, __io_waitid_cancel);
177 }
178
io_waitid_drop_issue_ref(struct io_kiocb * req)179 static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req)
180 {
181 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
182
183 if (!atomic_sub_return(1, &iw->refs))
184 return false;
185
186 io_waitid_remove_wq(req);
187
188 /*
189 * Wakeup triggered, racing with us. It was prevented from
190 * completing because of that, queue up the tw to do that.
191 */
192 req->io_task_work.func = io_waitid_cb;
193 io_req_task_work_add(req);
194 return true;
195 }
196
io_waitid_cb(struct io_tw_req tw_req,io_tw_token_t tw)197 static void io_waitid_cb(struct io_tw_req tw_req, io_tw_token_t tw)
198 {
199 struct io_kiocb *req = tw_req.req;
200 struct io_waitid_async *iwa = req->async_data;
201 struct io_ring_ctx *ctx = req->ctx;
202 int ret;
203
204 io_tw_lock(ctx, tw);
205
206 ret = __do_wait(&iwa->wo);
207
208 /*
209 * If we get -ERESTARTSYS here, we need to re-arm and check again
210 * to ensure we get another callback. If the retry works, then we can
211 * just remove ourselves from the waitqueue again and finish the
212 * request.
213 */
214 if (unlikely(ret == -ERESTARTSYS)) {
215 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
216
217 /* Don't retry if cancel found it meanwhile */
218 ret = -ECANCELED;
219 if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) {
220 iw->head = ¤t->signal->wait_chldexit;
221 add_wait_queue(iw->head, &iwa->wo.child_wait);
222 ret = __do_wait(&iwa->wo);
223 if (ret == -ERESTARTSYS) {
224 /* retry armed, drop our ref */
225 io_waitid_drop_issue_ref(req);
226 return;
227 }
228 /* fall through to complete, will kill waitqueue */
229 }
230 }
231
232 io_waitid_complete(req, ret);
233 io_req_task_complete(tw_req, tw);
234 }
235
io_waitid_wait(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)236 static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode,
237 int sync, void *key)
238 {
239 struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait);
240 struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo);
241 struct io_kiocb *req = iwa->req;
242 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
243 struct task_struct *p = key;
244
245 if (!pid_child_should_wake(wo, p))
246 return 0;
247
248 list_del_init(&wait->entry);
249 iw->head = NULL;
250
251 /* cancel is in progress */
252 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK)
253 return 1;
254
255 req->io_task_work.func = io_waitid_cb;
256 io_req_task_work_add(req);
257 return 1;
258 }
259
io_waitid_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)260 int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
261 {
262 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
263 struct io_waitid_async *iwa;
264
265 if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags)
266 return -EINVAL;
267
268 iwa = io_uring_alloc_async_data(NULL, req);
269 if (unlikely(!iwa))
270 return -ENOMEM;
271 iwa->req = req;
272
273 iw->which = READ_ONCE(sqe->len);
274 iw->upid = READ_ONCE(sqe->fd);
275 iw->options = READ_ONCE(sqe->file_index);
276 iw->head = NULL;
277 iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2));
278 return 0;
279 }
280
io_waitid(struct io_kiocb * req,unsigned int issue_flags)281 int io_waitid(struct io_kiocb *req, unsigned int issue_flags)
282 {
283 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid);
284 struct io_waitid_async *iwa = req->async_data;
285 struct io_ring_ctx *ctx = req->ctx;
286 int ret;
287
288 ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info,
289 iw->options, NULL);
290 if (ret)
291 goto done;
292
293 /*
294 * Mark the request as busy upfront, in case we're racing with the
295 * wakeup. If we are, then we'll notice when we drop this initial
296 * reference again after arming.
297 */
298 atomic_set(&iw->refs, 1);
299
300 /*
301 * Cancel must hold the ctx lock, so there's no risk of cancelation
302 * finding us until a) we remain on the list, and b) the lock is
303 * dropped. We only need to worry about racing with the wakeup
304 * callback.
305 */
306 io_ring_submit_lock(ctx, issue_flags);
307
308 /*
309 * iw->head is valid under the ring lock, and as long as the request
310 * is on the waitid_list where cancelations may find it.
311 */
312 iw->head = ¤t->signal->wait_chldexit;
313 hlist_add_head(&req->hash_node, &ctx->waitid_list);
314
315 init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait);
316 iwa->wo.child_wait.private = req->tctx->task;
317 add_wait_queue(iw->head, &iwa->wo.child_wait);
318
319 ret = __do_wait(&iwa->wo);
320 if (ret == -ERESTARTSYS) {
321 /*
322 * Nobody else grabbed a reference, it'll complete when we get
323 * a waitqueue callback, or if someone cancels it.
324 */
325 if (!io_waitid_drop_issue_ref(req)) {
326 io_ring_submit_unlock(ctx, issue_flags);
327 return IOU_ISSUE_SKIP_COMPLETE;
328 }
329
330 /*
331 * Wakeup triggered, racing with us. It was prevented from
332 * completing because of that, queue up the tw to do that.
333 */
334 io_ring_submit_unlock(ctx, issue_flags);
335 return IOU_ISSUE_SKIP_COMPLETE;
336 }
337
338 hlist_del_init(&req->hash_node);
339 io_waitid_remove_wq(req);
340 ret = io_waitid_finish(req, ret);
341
342 io_ring_submit_unlock(ctx, issue_flags);
343 done:
344 if (ret < 0)
345 req_set_fail(req);
346 io_req_set_res(req, ret, 0);
347 return IOU_COMPLETE;
348 }
349