1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Support for async notification of waitid 4 */ 5 #include <linux/kernel.h> 6 #include <linux/errno.h> 7 #include <linux/fs.h> 8 #include <linux/file.h> 9 #include <linux/compat.h> 10 #include <linux/io_uring.h> 11 12 #include <uapi/linux/io_uring.h> 13 14 #include "io_uring.h" 15 #include "cancel.h" 16 #include "waitid.h" 17 #include "../kernel/exit.h" 18 19 static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts); 20 21 #define IO_WAITID_CANCEL_FLAG BIT(31) 22 #define IO_WAITID_REF_MASK GENMASK(30, 0) 23 24 struct io_waitid { 25 struct file *file; 26 int which; 27 pid_t upid; 28 int options; 29 atomic_t refs; 30 struct wait_queue_head *head; 31 struct siginfo __user *infop; 32 struct waitid_info info; 33 }; 34 35 static void io_waitid_free(struct io_kiocb *req) 36 { 37 struct io_waitid_async *iwa = req->async_data; 38 39 put_pid(iwa->wo.wo_pid); 40 kfree(req->async_data); 41 req->async_data = NULL; 42 req->flags &= ~REQ_F_ASYNC_DATA; 43 } 44 45 #ifdef CONFIG_COMPAT 46 static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo) 47 { 48 struct compat_siginfo __user *infop; 49 bool ret; 50 51 infop = (struct compat_siginfo __user *) iw->infop; 52 53 if (!user_write_access_begin(infop, sizeof(*infop))) 54 return false; 55 56 unsafe_put_user(signo, &infop->si_signo, Efault); 57 unsafe_put_user(0, &infop->si_errno, Efault); 58 unsafe_put_user(iw->info.cause, &infop->si_code, Efault); 59 unsafe_put_user(iw->info.pid, &infop->si_pid, Efault); 60 unsafe_put_user(iw->info.uid, &infop->si_uid, Efault); 61 unsafe_put_user(iw->info.status, &infop->si_status, Efault); 62 ret = true; 63 done: 64 user_write_access_end(); 65 return ret; 66 Efault: 67 ret = false; 68 goto done; 69 } 70 #endif 71 72 static bool io_waitid_copy_si(struct io_kiocb *req, int signo) 73 { 74 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 75 bool ret; 76 77 if (!iw->infop) 78 return true; 79 80 #ifdef CONFIG_COMPAT 81 if (req->ctx->compat) 82 return io_waitid_compat_copy_si(iw, signo); 83 #endif 84 85 if (!user_write_access_begin(iw->infop, sizeof(*iw->infop))) 86 return false; 87 88 unsafe_put_user(signo, &iw->infop->si_signo, Efault); 89 unsafe_put_user(0, &iw->infop->si_errno, Efault); 90 unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault); 91 unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault); 92 unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault); 93 unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault); 94 ret = true; 95 done: 96 user_write_access_end(); 97 return ret; 98 Efault: 99 ret = false; 100 goto done; 101 } 102 103 static int io_waitid_finish(struct io_kiocb *req, int ret) 104 { 105 int signo = 0; 106 107 if (ret > 0) { 108 signo = SIGCHLD; 109 ret = 0; 110 } 111 112 if (!io_waitid_copy_si(req, signo)) 113 ret = -EFAULT; 114 io_waitid_free(req); 115 return ret; 116 } 117 118 static void io_waitid_complete(struct io_kiocb *req, int ret) 119 { 120 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 121 122 /* anyone completing better be holding a reference */ 123 WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK)); 124 125 lockdep_assert_held(&req->ctx->uring_lock); 126 127 hlist_del_init(&req->hash_node); 128 129 ret = io_waitid_finish(req, ret); 130 if (ret < 0) 131 req_set_fail(req); 132 io_req_set_res(req, ret, 0); 133 } 134 135 static bool __io_waitid_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) 136 { 137 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 138 struct io_waitid_async *iwa = req->async_data; 139 140 /* 141 * Mark us canceled regardless of ownership. This will prevent a 142 * potential retry from a spurious wakeup. 143 */ 144 atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs); 145 146 /* claim ownership */ 147 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) 148 return false; 149 150 spin_lock_irq(&iw->head->lock); 151 list_del_init(&iwa->wo.child_wait.entry); 152 spin_unlock_irq(&iw->head->lock); 153 io_waitid_complete(req, -ECANCELED); 154 io_req_queue_tw_complete(req, -ECANCELED); 155 return true; 156 } 157 158 int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 159 unsigned int issue_flags) 160 { 161 struct hlist_node *tmp; 162 struct io_kiocb *req; 163 int nr = 0; 164 165 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_FD_FIXED)) 166 return -ENOENT; 167 168 io_ring_submit_lock(ctx, issue_flags); 169 hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) { 170 if (req->cqe.user_data != cd->data && 171 !(cd->flags & IORING_ASYNC_CANCEL_ANY)) 172 continue; 173 if (__io_waitid_cancel(ctx, req)) 174 nr++; 175 if (!(cd->flags & IORING_ASYNC_CANCEL_ALL)) 176 break; 177 } 178 io_ring_submit_unlock(ctx, issue_flags); 179 180 if (nr) 181 return nr; 182 183 return -ENOENT; 184 } 185 186 bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 187 bool cancel_all) 188 { 189 struct hlist_node *tmp; 190 struct io_kiocb *req; 191 bool found = false; 192 193 lockdep_assert_held(&ctx->uring_lock); 194 195 hlist_for_each_entry_safe(req, tmp, &ctx->waitid_list, hash_node) { 196 if (!io_match_task_safe(req, tctx, cancel_all)) 197 continue; 198 hlist_del_init(&req->hash_node); 199 __io_waitid_cancel(ctx, req); 200 found = true; 201 } 202 203 return found; 204 } 205 206 static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req) 207 { 208 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 209 struct io_waitid_async *iwa = req->async_data; 210 211 if (!atomic_sub_return(1, &iw->refs)) 212 return false; 213 214 /* 215 * Wakeup triggered, racing with us. It was prevented from 216 * completing because of that, queue up the tw to do that. 217 */ 218 req->io_task_work.func = io_waitid_cb; 219 io_req_task_work_add(req); 220 remove_wait_queue(iw->head, &iwa->wo.child_wait); 221 return true; 222 } 223 224 static void io_waitid_cb(struct io_kiocb *req, struct io_tw_state *ts) 225 { 226 struct io_waitid_async *iwa = req->async_data; 227 struct io_ring_ctx *ctx = req->ctx; 228 int ret; 229 230 io_tw_lock(ctx, ts); 231 232 ret = __do_wait(&iwa->wo); 233 234 /* 235 * If we get -ERESTARTSYS here, we need to re-arm and check again 236 * to ensure we get another callback. If the retry works, then we can 237 * just remove ourselves from the waitqueue again and finish the 238 * request. 239 */ 240 if (unlikely(ret == -ERESTARTSYS)) { 241 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 242 243 /* Don't retry if cancel found it meanwhile */ 244 ret = -ECANCELED; 245 if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) { 246 iw->head = ¤t->signal->wait_chldexit; 247 add_wait_queue(iw->head, &iwa->wo.child_wait); 248 ret = __do_wait(&iwa->wo); 249 if (ret == -ERESTARTSYS) { 250 /* retry armed, drop our ref */ 251 io_waitid_drop_issue_ref(req); 252 return; 253 } 254 255 remove_wait_queue(iw->head, &iwa->wo.child_wait); 256 } 257 } 258 259 io_waitid_complete(req, ret); 260 io_req_task_complete(req, ts); 261 } 262 263 static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode, 264 int sync, void *key) 265 { 266 struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait); 267 struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo); 268 struct io_kiocb *req = iwa->req; 269 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 270 struct task_struct *p = key; 271 272 if (!pid_child_should_wake(wo, p)) 273 return 0; 274 275 /* cancel is in progress */ 276 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) 277 return 1; 278 279 req->io_task_work.func = io_waitid_cb; 280 io_req_task_work_add(req); 281 list_del_init(&wait->entry); 282 return 1; 283 } 284 285 int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 286 { 287 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 288 struct io_waitid_async *iwa; 289 290 if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags) 291 return -EINVAL; 292 293 iwa = io_uring_alloc_async_data(NULL, req); 294 if (!unlikely(iwa)) 295 return -ENOMEM; 296 iwa->req = req; 297 298 iw->which = READ_ONCE(sqe->len); 299 iw->upid = READ_ONCE(sqe->fd); 300 iw->options = READ_ONCE(sqe->file_index); 301 iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 302 return 0; 303 } 304 305 int io_waitid(struct io_kiocb *req, unsigned int issue_flags) 306 { 307 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 308 struct io_waitid_async *iwa = req->async_data; 309 struct io_ring_ctx *ctx = req->ctx; 310 int ret; 311 312 ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info, 313 iw->options, NULL); 314 if (ret) 315 goto done; 316 317 /* 318 * Mark the request as busy upfront, in case we're racing with the 319 * wakeup. If we are, then we'll notice when we drop this initial 320 * reference again after arming. 321 */ 322 atomic_set(&iw->refs, 1); 323 324 /* 325 * Cancel must hold the ctx lock, so there's no risk of cancelation 326 * finding us until a) we remain on the list, and b) the lock is 327 * dropped. We only need to worry about racing with the wakeup 328 * callback. 329 */ 330 io_ring_submit_lock(ctx, issue_flags); 331 hlist_add_head(&req->hash_node, &ctx->waitid_list); 332 333 init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait); 334 iwa->wo.child_wait.private = req->tctx->task; 335 iw->head = ¤t->signal->wait_chldexit; 336 add_wait_queue(iw->head, &iwa->wo.child_wait); 337 338 ret = __do_wait(&iwa->wo); 339 if (ret == -ERESTARTSYS) { 340 /* 341 * Nobody else grabbed a reference, it'll complete when we get 342 * a waitqueue callback, or if someone cancels it. 343 */ 344 if (!io_waitid_drop_issue_ref(req)) { 345 io_ring_submit_unlock(ctx, issue_flags); 346 return IOU_ISSUE_SKIP_COMPLETE; 347 } 348 349 /* 350 * Wakeup triggered, racing with us. It was prevented from 351 * completing because of that, queue up the tw to do that. 352 */ 353 io_ring_submit_unlock(ctx, issue_flags); 354 return IOU_ISSUE_SKIP_COMPLETE; 355 } 356 357 hlist_del_init(&req->hash_node); 358 remove_wait_queue(iw->head, &iwa->wo.child_wait); 359 ret = io_waitid_finish(req, ret); 360 361 io_ring_submit_unlock(ctx, issue_flags); 362 done: 363 if (ret < 0) 364 req_set_fail(req); 365 io_req_set_res(req, ret, 0); 366 return IOU_OK; 367 } 368