1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Support for async notification of waitid 4 */ 5 #include <linux/kernel.h> 6 #include <linux/errno.h> 7 #include <linux/fs.h> 8 #include <linux/file.h> 9 #include <linux/compat.h> 10 #include <linux/io_uring.h> 11 12 #include <uapi/linux/io_uring.h> 13 14 #include "io_uring.h" 15 #include "cancel.h" 16 #include "waitid.h" 17 #include "../kernel/exit.h" 18 19 static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw); 20 21 #define IO_WAITID_CANCEL_FLAG BIT(31) 22 #define IO_WAITID_REF_MASK GENMASK(30, 0) 23 24 struct io_waitid { 25 struct file *file; 26 int which; 27 pid_t upid; 28 int options; 29 atomic_t refs; 30 struct wait_queue_head *head; 31 struct siginfo __user *infop; 32 struct waitid_info info; 33 }; 34 35 static void io_waitid_free(struct io_kiocb *req) 36 { 37 struct io_waitid_async *iwa = req->async_data; 38 39 put_pid(iwa->wo.wo_pid); 40 io_req_async_data_free(req); 41 } 42 43 static bool io_waitid_compat_copy_si(struct io_waitid *iw, int signo) 44 { 45 struct compat_siginfo __user *infop; 46 bool ret; 47 48 infop = (struct compat_siginfo __user *) iw->infop; 49 50 if (!user_write_access_begin(infop, sizeof(*infop))) 51 return false; 52 53 unsafe_put_user(signo, &infop->si_signo, Efault); 54 unsafe_put_user(0, &infop->si_errno, Efault); 55 unsafe_put_user(iw->info.cause, &infop->si_code, Efault); 56 unsafe_put_user(iw->info.pid, &infop->si_pid, Efault); 57 unsafe_put_user(iw->info.uid, &infop->si_uid, Efault); 58 unsafe_put_user(iw->info.status, &infop->si_status, Efault); 59 ret = true; 60 done: 61 user_write_access_end(); 62 return ret; 63 Efault: 64 ret = false; 65 goto done; 66 } 67 68 static bool io_waitid_copy_si(struct io_kiocb *req, int signo) 69 { 70 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 71 bool ret; 72 73 if (!iw->infop) 74 return true; 75 76 if (io_is_compat(req->ctx)) 77 return io_waitid_compat_copy_si(iw, signo); 78 79 if (!user_write_access_begin(iw->infop, sizeof(*iw->infop))) 80 return false; 81 82 unsafe_put_user(signo, &iw->infop->si_signo, Efault); 83 unsafe_put_user(0, &iw->infop->si_errno, Efault); 84 unsafe_put_user(iw->info.cause, &iw->infop->si_code, Efault); 85 unsafe_put_user(iw->info.pid, &iw->infop->si_pid, Efault); 86 unsafe_put_user(iw->info.uid, &iw->infop->si_uid, Efault); 87 unsafe_put_user(iw->info.status, &iw->infop->si_status, Efault); 88 ret = true; 89 done: 90 user_write_access_end(); 91 return ret; 92 Efault: 93 ret = false; 94 goto done; 95 } 96 97 static int io_waitid_finish(struct io_kiocb *req, int ret) 98 { 99 int signo = 0; 100 101 if (ret > 0) { 102 signo = SIGCHLD; 103 ret = 0; 104 } 105 106 if (!io_waitid_copy_si(req, signo)) 107 ret = -EFAULT; 108 io_waitid_free(req); 109 return ret; 110 } 111 112 static void io_waitid_complete(struct io_kiocb *req, int ret) 113 { 114 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 115 116 /* anyone completing better be holding a reference */ 117 WARN_ON_ONCE(!(atomic_read(&iw->refs) & IO_WAITID_REF_MASK)); 118 119 lockdep_assert_held(&req->ctx->uring_lock); 120 121 hlist_del_init(&req->hash_node); 122 123 ret = io_waitid_finish(req, ret); 124 if (ret < 0) 125 req_set_fail(req); 126 io_req_set_res(req, ret, 0); 127 } 128 129 static bool __io_waitid_cancel(struct io_kiocb *req) 130 { 131 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 132 struct io_waitid_async *iwa = req->async_data; 133 134 /* 135 * Mark us canceled regardless of ownership. This will prevent a 136 * potential retry from a spurious wakeup. 137 */ 138 atomic_or(IO_WAITID_CANCEL_FLAG, &iw->refs); 139 140 /* claim ownership */ 141 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) 142 return false; 143 144 spin_lock_irq(&iw->head->lock); 145 list_del_init(&iwa->wo.child_wait.entry); 146 spin_unlock_irq(&iw->head->lock); 147 io_waitid_complete(req, -ECANCELED); 148 io_req_queue_tw_complete(req, -ECANCELED); 149 return true; 150 } 151 152 int io_waitid_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 153 unsigned int issue_flags) 154 { 155 return io_cancel_remove(ctx, cd, issue_flags, &ctx->waitid_list, __io_waitid_cancel); 156 } 157 158 bool io_waitid_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 159 bool cancel_all) 160 { 161 return io_cancel_remove_all(ctx, tctx, &ctx->waitid_list, cancel_all, __io_waitid_cancel); 162 } 163 164 static inline bool io_waitid_drop_issue_ref(struct io_kiocb *req) 165 { 166 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 167 struct io_waitid_async *iwa = req->async_data; 168 169 if (!atomic_sub_return(1, &iw->refs)) 170 return false; 171 172 /* 173 * Wakeup triggered, racing with us. It was prevented from 174 * completing because of that, queue up the tw to do that. 175 */ 176 req->io_task_work.func = io_waitid_cb; 177 io_req_task_work_add(req); 178 remove_wait_queue(iw->head, &iwa->wo.child_wait); 179 return true; 180 } 181 182 static void io_waitid_cb(struct io_kiocb *req, io_tw_token_t tw) 183 { 184 struct io_waitid_async *iwa = req->async_data; 185 struct io_ring_ctx *ctx = req->ctx; 186 int ret; 187 188 io_tw_lock(ctx, tw); 189 190 ret = __do_wait(&iwa->wo); 191 192 /* 193 * If we get -ERESTARTSYS here, we need to re-arm and check again 194 * to ensure we get another callback. If the retry works, then we can 195 * just remove ourselves from the waitqueue again and finish the 196 * request. 197 */ 198 if (unlikely(ret == -ERESTARTSYS)) { 199 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 200 201 /* Don't retry if cancel found it meanwhile */ 202 ret = -ECANCELED; 203 if (!(atomic_read(&iw->refs) & IO_WAITID_CANCEL_FLAG)) { 204 iw->head = ¤t->signal->wait_chldexit; 205 add_wait_queue(iw->head, &iwa->wo.child_wait); 206 ret = __do_wait(&iwa->wo); 207 if (ret == -ERESTARTSYS) { 208 /* retry armed, drop our ref */ 209 io_waitid_drop_issue_ref(req); 210 return; 211 } 212 213 remove_wait_queue(iw->head, &iwa->wo.child_wait); 214 } 215 } 216 217 io_waitid_complete(req, ret); 218 io_req_task_complete(req, tw); 219 } 220 221 static int io_waitid_wait(struct wait_queue_entry *wait, unsigned mode, 222 int sync, void *key) 223 { 224 struct wait_opts *wo = container_of(wait, struct wait_opts, child_wait); 225 struct io_waitid_async *iwa = container_of(wo, struct io_waitid_async, wo); 226 struct io_kiocb *req = iwa->req; 227 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 228 struct task_struct *p = key; 229 230 if (!pid_child_should_wake(wo, p)) 231 return 0; 232 233 /* cancel is in progress */ 234 if (atomic_fetch_inc(&iw->refs) & IO_WAITID_REF_MASK) 235 return 1; 236 237 req->io_task_work.func = io_waitid_cb; 238 io_req_task_work_add(req); 239 list_del_init(&wait->entry); 240 return 1; 241 } 242 243 int io_waitid_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 244 { 245 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 246 struct io_waitid_async *iwa; 247 248 if (sqe->addr || sqe->buf_index || sqe->addr3 || sqe->waitid_flags) 249 return -EINVAL; 250 251 iwa = io_uring_alloc_async_data(NULL, req); 252 if (!unlikely(iwa)) 253 return -ENOMEM; 254 iwa->req = req; 255 256 iw->which = READ_ONCE(sqe->len); 257 iw->upid = READ_ONCE(sqe->fd); 258 iw->options = READ_ONCE(sqe->file_index); 259 iw->infop = u64_to_user_ptr(READ_ONCE(sqe->addr2)); 260 return 0; 261 } 262 263 int io_waitid(struct io_kiocb *req, unsigned int issue_flags) 264 { 265 struct io_waitid *iw = io_kiocb_to_cmd(req, struct io_waitid); 266 struct io_waitid_async *iwa = req->async_data; 267 struct io_ring_ctx *ctx = req->ctx; 268 int ret; 269 270 ret = kernel_waitid_prepare(&iwa->wo, iw->which, iw->upid, &iw->info, 271 iw->options, NULL); 272 if (ret) 273 goto done; 274 275 /* 276 * Mark the request as busy upfront, in case we're racing with the 277 * wakeup. If we are, then we'll notice when we drop this initial 278 * reference again after arming. 279 */ 280 atomic_set(&iw->refs, 1); 281 282 /* 283 * Cancel must hold the ctx lock, so there's no risk of cancelation 284 * finding us until a) we remain on the list, and b) the lock is 285 * dropped. We only need to worry about racing with the wakeup 286 * callback. 287 */ 288 io_ring_submit_lock(ctx, issue_flags); 289 hlist_add_head(&req->hash_node, &ctx->waitid_list); 290 291 init_waitqueue_func_entry(&iwa->wo.child_wait, io_waitid_wait); 292 iwa->wo.child_wait.private = req->tctx->task; 293 iw->head = ¤t->signal->wait_chldexit; 294 add_wait_queue(iw->head, &iwa->wo.child_wait); 295 296 ret = __do_wait(&iwa->wo); 297 if (ret == -ERESTARTSYS) { 298 /* 299 * Nobody else grabbed a reference, it'll complete when we get 300 * a waitqueue callback, or if someone cancels it. 301 */ 302 if (!io_waitid_drop_issue_ref(req)) { 303 io_ring_submit_unlock(ctx, issue_flags); 304 return IOU_ISSUE_SKIP_COMPLETE; 305 } 306 307 /* 308 * Wakeup triggered, racing with us. It was prevented from 309 * completing because of that, queue up the tw to do that. 310 */ 311 io_ring_submit_unlock(ctx, issue_flags); 312 return IOU_ISSUE_SKIP_COMPLETE; 313 } 314 315 hlist_del_init(&req->hash_node); 316 remove_wait_queue(iw->head, &iwa->wo.child_wait); 317 ret = io_waitid_finish(req, ret); 318 319 io_ring_submit_unlock(ctx, issue_flags); 320 done: 321 if (ret < 0) 322 req_set_fail(req); 323 io_req_set_res(req, ret, 0); 324 return IOU_COMPLETE; 325 } 326