1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/mm.h> 7 #include <linux/slab.h> 8 #include <linux/poll.h> 9 #include <linux/hashtable.h> 10 #include <linux/io_uring.h> 11 12 #include <trace/events/io_uring.h> 13 14 #include <uapi/linux/io_uring.h> 15 16 #include "io_uring.h" 17 #include "refs.h" 18 #include "opdef.h" 19 #include "kbuf.h" 20 #include "poll.h" 21 #include "cancel.h" 22 23 struct io_poll_update { 24 struct file *file; 25 u64 old_user_data; 26 u64 new_user_data; 27 __poll_t events; 28 bool update_events; 29 bool update_user_data; 30 }; 31 32 struct io_poll_table { 33 struct poll_table_struct pt; 34 struct io_kiocb *req; 35 int nr_entries; 36 int error; 37 bool owning; 38 /* output value, set only if arm poll returns >0 */ 39 __poll_t result_mask; 40 }; 41 42 #define IO_POLL_CANCEL_FLAG BIT(31) 43 #define IO_POLL_RETRY_FLAG BIT(30) 44 #define IO_POLL_REF_MASK GENMASK(29, 0) 45 46 /* 47 * We usually have 1-2 refs taken, 128 is more than enough and we want to 48 * maximise the margin between this amount and the moment when it overflows. 49 */ 50 #define IO_POLL_REF_BIAS 128 51 52 #define IO_WQE_F_DOUBLE 1 53 54 static inline struct io_kiocb *wqe_to_req(struct wait_queue_entry *wqe) 55 { 56 unsigned long priv = (unsigned long)wqe->private; 57 58 return (struct io_kiocb *)(priv & ~IO_WQE_F_DOUBLE); 59 } 60 61 static inline bool wqe_is_double(struct wait_queue_entry *wqe) 62 { 63 unsigned long priv = (unsigned long)wqe->private; 64 65 return priv & IO_WQE_F_DOUBLE; 66 } 67 68 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) 69 { 70 int v; 71 72 /* 73 * poll_refs are already elevated and we don't have much hope for 74 * grabbing the ownership. Instead of incrementing set a retry flag 75 * to notify the loop that there might have been some change. 76 */ 77 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); 78 if (v & IO_POLL_REF_MASK) 79 return false; 80 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); 81 } 82 83 /* 84 * If refs part of ->poll_refs (see IO_POLL_REF_MASK) is 0, it's free. We can 85 * bump it and acquire ownership. It's disallowed to modify requests while not 86 * owning it, that prevents from races for enqueueing task_work's and b/w 87 * arming poll and wakeups. 88 */ 89 static inline bool io_poll_get_ownership(struct io_kiocb *req) 90 { 91 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) 92 return io_poll_get_ownership_slowpath(req); 93 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); 94 } 95 96 static void io_poll_mark_cancelled(struct io_kiocb *req) 97 { 98 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); 99 } 100 101 static struct io_poll *io_poll_get_double(struct io_kiocb *req) 102 { 103 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */ 104 if (req->opcode == IORING_OP_POLL_ADD) 105 return req->async_data; 106 return req->apoll->double_poll; 107 } 108 109 static struct io_poll *io_poll_get_single(struct io_kiocb *req) 110 { 111 if (req->opcode == IORING_OP_POLL_ADD) 112 return io_kiocb_to_cmd(req, struct io_poll); 113 return &req->apoll->poll; 114 } 115 116 static void io_poll_req_insert(struct io_kiocb *req) 117 { 118 struct io_hash_table *table = &req->ctx->cancel_table; 119 u32 index = hash_long(req->cqe.user_data, table->hash_bits); 120 struct io_hash_bucket *hb = &table->hbs[index]; 121 122 spin_lock(&hb->lock); 123 hlist_add_head(&req->hash_node, &hb->list); 124 spin_unlock(&hb->lock); 125 } 126 127 static void io_poll_req_delete(struct io_kiocb *req, struct io_ring_ctx *ctx) 128 { 129 struct io_hash_table *table = &req->ctx->cancel_table; 130 u32 index = hash_long(req->cqe.user_data, table->hash_bits); 131 spinlock_t *lock = &table->hbs[index].lock; 132 133 spin_lock(lock); 134 hash_del(&req->hash_node); 135 spin_unlock(lock); 136 } 137 138 static void io_poll_req_insert_locked(struct io_kiocb *req) 139 { 140 struct io_hash_table *table = &req->ctx->cancel_table_locked; 141 u32 index = hash_long(req->cqe.user_data, table->hash_bits); 142 143 lockdep_assert_held(&req->ctx->uring_lock); 144 145 hlist_add_head(&req->hash_node, &table->hbs[index].list); 146 } 147 148 static void io_poll_tw_hash_eject(struct io_kiocb *req, bool *locked) 149 { 150 struct io_ring_ctx *ctx = req->ctx; 151 152 if (req->flags & REQ_F_HASH_LOCKED) { 153 /* 154 * ->cancel_table_locked is protected by ->uring_lock in 155 * contrast to per bucket spinlocks. Likely, tctx_task_work() 156 * already grabbed the mutex for us, but there is a chance it 157 * failed. 158 */ 159 io_tw_lock(ctx, locked); 160 hash_del(&req->hash_node); 161 req->flags &= ~REQ_F_HASH_LOCKED; 162 } else { 163 io_poll_req_delete(req, ctx); 164 } 165 } 166 167 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events, 168 wait_queue_func_t wake_func) 169 { 170 poll->head = NULL; 171 #define IO_POLL_UNMASK (EPOLLERR|EPOLLHUP|EPOLLNVAL|EPOLLRDHUP) 172 /* mask in events that we always want/need */ 173 poll->events = events | IO_POLL_UNMASK; 174 INIT_LIST_HEAD(&poll->wait.entry); 175 init_waitqueue_func_entry(&poll->wait, wake_func); 176 } 177 178 static inline void io_poll_remove_entry(struct io_poll *poll) 179 { 180 struct wait_queue_head *head = smp_load_acquire(&poll->head); 181 182 if (head) { 183 spin_lock_irq(&head->lock); 184 list_del_init(&poll->wait.entry); 185 poll->head = NULL; 186 spin_unlock_irq(&head->lock); 187 } 188 } 189 190 static void io_poll_remove_entries(struct io_kiocb *req) 191 { 192 /* 193 * Nothing to do if neither of those flags are set. Avoid dipping 194 * into the poll/apoll/double cachelines if we can. 195 */ 196 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL))) 197 return; 198 199 /* 200 * While we hold the waitqueue lock and the waitqueue is nonempty, 201 * wake_up_pollfree() will wait for us. However, taking the waitqueue 202 * lock in the first place can race with the waitqueue being freed. 203 * 204 * We solve this as eventpoll does: by taking advantage of the fact that 205 * all users of wake_up_pollfree() will RCU-delay the actual free. If 206 * we enter rcu_read_lock() and see that the pointer to the queue is 207 * non-NULL, we can then lock it without the memory being freed out from 208 * under us. 209 * 210 * Keep holding rcu_read_lock() as long as we hold the queue lock, in 211 * case the caller deletes the entry from the queue, leaving it empty. 212 * In that case, only RCU prevents the queue memory from being freed. 213 */ 214 rcu_read_lock(); 215 if (req->flags & REQ_F_SINGLE_POLL) 216 io_poll_remove_entry(io_poll_get_single(req)); 217 if (req->flags & REQ_F_DOUBLE_POLL) 218 io_poll_remove_entry(io_poll_get_double(req)); 219 rcu_read_unlock(); 220 } 221 222 enum { 223 IOU_POLL_DONE = 0, 224 IOU_POLL_NO_ACTION = 1, 225 IOU_POLL_REMOVE_POLL_USE_RES = 2, 226 }; 227 228 /* 229 * All poll tw should go through this. Checks for poll events, manages 230 * references, does rewait, etc. 231 * 232 * Returns a negative error on failure. IOU_POLL_NO_ACTION when no action require, 233 * which is either spurious wakeup or multishot CQE is served. 234 * IOU_POLL_DONE when it's done with the request, then the mask is stored in req->cqe.res. 235 * IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot poll and that the result 236 * is stored in req->cqe. 237 */ 238 static int io_poll_check_events(struct io_kiocb *req, bool *locked) 239 { 240 struct io_ring_ctx *ctx = req->ctx; 241 int v, ret; 242 243 /* req->task == current here, checking PF_EXITING is safe */ 244 if (unlikely(req->task->flags & PF_EXITING)) 245 return -ECANCELED; 246 247 do { 248 v = atomic_read(&req->poll_refs); 249 250 /* tw handler should be the owner, and so have some references */ 251 if (WARN_ON_ONCE(!(v & IO_POLL_REF_MASK))) 252 return IOU_POLL_DONE; 253 if (v & IO_POLL_CANCEL_FLAG) 254 return -ECANCELED; 255 /* 256 * cqe.res contains only events of the first wake up 257 * and all others are be lost. Redo vfs_poll() to get 258 * up to date state. 259 */ 260 if ((v & IO_POLL_REF_MASK) != 1) 261 req->cqe.res = 0; 262 if (v & IO_POLL_RETRY_FLAG) { 263 req->cqe.res = 0; 264 /* 265 * We won't find new events that came in between 266 * vfs_poll and the ref put unless we clear the flag 267 * in advance. 268 */ 269 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); 270 v &= ~IO_POLL_RETRY_FLAG; 271 } 272 273 /* the mask was stashed in __io_poll_execute */ 274 if (!req->cqe.res) { 275 struct poll_table_struct pt = { ._key = req->apoll_events }; 276 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events; 277 } 278 279 if ((unlikely(!req->cqe.res))) 280 continue; 281 if (req->apoll_events & EPOLLONESHOT) 282 return IOU_POLL_DONE; 283 if (io_is_uring_fops(req->file)) 284 return IOU_POLL_DONE; 285 286 /* multishot, just fill a CQE and proceed */ 287 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) { 288 __poll_t mask = mangle_poll(req->cqe.res & 289 req->apoll_events); 290 291 if (!io_post_aux_cqe(ctx, req->cqe.user_data, 292 mask, IORING_CQE_F_MORE, false)) { 293 io_req_set_res(req, mask, 0); 294 return IOU_POLL_REMOVE_POLL_USE_RES; 295 } 296 } else { 297 ret = io_poll_issue(req, locked); 298 if (ret == IOU_STOP_MULTISHOT) 299 return IOU_POLL_REMOVE_POLL_USE_RES; 300 if (ret < 0) 301 return ret; 302 } 303 304 /* force the next iteration to vfs_poll() */ 305 req->cqe.res = 0; 306 307 /* 308 * Release all references, retry if someone tried to restart 309 * task_work while we were executing it. 310 */ 311 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & 312 IO_POLL_REF_MASK); 313 314 return IOU_POLL_NO_ACTION; 315 } 316 317 static void io_poll_task_func(struct io_kiocb *req, bool *locked) 318 { 319 int ret; 320 321 ret = io_poll_check_events(req, locked); 322 if (ret == IOU_POLL_NO_ACTION) 323 return; 324 325 if (ret == IOU_POLL_DONE) { 326 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); 327 req->cqe.res = mangle_poll(req->cqe.res & poll->events); 328 } else if (ret != IOU_POLL_REMOVE_POLL_USE_RES) { 329 req->cqe.res = ret; 330 req_set_fail(req); 331 } 332 333 io_poll_remove_entries(req); 334 io_poll_tw_hash_eject(req, locked); 335 336 io_req_set_res(req, req->cqe.res, 0); 337 io_req_task_complete(req, locked); 338 } 339 340 static void io_apoll_task_func(struct io_kiocb *req, bool *locked) 341 { 342 int ret; 343 344 ret = io_poll_check_events(req, locked); 345 if (ret == IOU_POLL_NO_ACTION) 346 return; 347 348 io_poll_remove_entries(req); 349 io_poll_tw_hash_eject(req, locked); 350 351 if (ret == IOU_POLL_REMOVE_POLL_USE_RES) 352 io_req_complete_post(req); 353 else if (ret == IOU_POLL_DONE) 354 io_req_task_submit(req, locked); 355 else 356 io_req_complete_failed(req, ret); 357 } 358 359 static void __io_poll_execute(struct io_kiocb *req, int mask) 360 { 361 io_req_set_res(req, mask, 0); 362 /* 363 * This is useful for poll that is armed on behalf of another 364 * request, and where the wakeup path could be on a different 365 * CPU. We want to avoid pulling in req->apoll->events for that 366 * case. 367 */ 368 if (req->opcode == IORING_OP_POLL_ADD) 369 req->io_task_work.func = io_poll_task_func; 370 else 371 req->io_task_work.func = io_apoll_task_func; 372 373 trace_io_uring_task_add(req, mask); 374 io_req_task_work_add(req); 375 } 376 377 static inline void io_poll_execute(struct io_kiocb *req, int res) 378 { 379 if (io_poll_get_ownership(req)) 380 __io_poll_execute(req, res); 381 } 382 383 static void io_poll_cancel_req(struct io_kiocb *req) 384 { 385 io_poll_mark_cancelled(req); 386 /* kick tw, which should complete the request */ 387 io_poll_execute(req, 0); 388 } 389 390 #define IO_ASYNC_POLL_COMMON (EPOLLONESHOT | EPOLLPRI) 391 392 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll) 393 { 394 io_poll_mark_cancelled(req); 395 /* we have to kick tw in case it's not already */ 396 io_poll_execute(req, 0); 397 398 /* 399 * If the waitqueue is being freed early but someone is already 400 * holds ownership over it, we have to tear down the request as 401 * best we can. That means immediately removing the request from 402 * its waitqueue and preventing all further accesses to the 403 * waitqueue via the request. 404 */ 405 list_del_init(&poll->wait.entry); 406 407 /* 408 * Careful: this *must* be the last step, since as soon 409 * as req->head is NULL'ed out, the request can be 410 * completed and freed, since aio_poll_complete_work() 411 * will no longer need to take the waitqueue lock. 412 */ 413 smp_store_release(&poll->head, NULL); 414 return 1; 415 } 416 417 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, 418 void *key) 419 { 420 struct io_kiocb *req = wqe_to_req(wait); 421 struct io_poll *poll = container_of(wait, struct io_poll, wait); 422 __poll_t mask = key_to_poll(key); 423 424 if (unlikely(mask & POLLFREE)) 425 return io_pollfree_wake(req, poll); 426 427 /* for instances that support it check for an event match first */ 428 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON))) 429 return 0; 430 431 if (io_poll_get_ownership(req)) { 432 /* optional, saves extra locking for removal in tw handler */ 433 if (mask && poll->events & EPOLLONESHOT) { 434 list_del_init(&poll->wait.entry); 435 poll->head = NULL; 436 if (wqe_is_double(wait)) 437 req->flags &= ~REQ_F_DOUBLE_POLL; 438 else 439 req->flags &= ~REQ_F_SINGLE_POLL; 440 } 441 __io_poll_execute(req, mask); 442 } 443 return 1; 444 } 445 446 /* fails only when polling is already completing by the first entry */ 447 static bool io_poll_double_prepare(struct io_kiocb *req) 448 { 449 struct wait_queue_head *head; 450 struct io_poll *poll = io_poll_get_single(req); 451 452 /* head is RCU protected, see io_poll_remove_entries() comments */ 453 rcu_read_lock(); 454 head = smp_load_acquire(&poll->head); 455 /* 456 * poll arm might not hold ownership and so race for req->flags with 457 * io_poll_wake(). There is only one poll entry queued, serialise with 458 * it by taking its head lock. As we're still arming the tw hanlder 459 * is not going to be run, so there are no races with it. 460 */ 461 if (head) { 462 spin_lock_irq(&head->lock); 463 req->flags |= REQ_F_DOUBLE_POLL; 464 if (req->opcode == IORING_OP_POLL_ADD) 465 req->flags |= REQ_F_ASYNC_DATA; 466 spin_unlock_irq(&head->lock); 467 } 468 rcu_read_unlock(); 469 return !!head; 470 } 471 472 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt, 473 struct wait_queue_head *head, 474 struct io_poll **poll_ptr) 475 { 476 struct io_kiocb *req = pt->req; 477 unsigned long wqe_private = (unsigned long) req; 478 479 /* 480 * The file being polled uses multiple waitqueues for poll handling 481 * (e.g. one for read, one for write). Setup a separate io_poll 482 * if this happens. 483 */ 484 if (unlikely(pt->nr_entries)) { 485 struct io_poll *first = poll; 486 487 /* double add on the same waitqueue head, ignore */ 488 if (first->head == head) 489 return; 490 /* already have a 2nd entry, fail a third attempt */ 491 if (*poll_ptr) { 492 if ((*poll_ptr)->head == head) 493 return; 494 pt->error = -EINVAL; 495 return; 496 } 497 498 poll = kmalloc(sizeof(*poll), GFP_ATOMIC); 499 if (!poll) { 500 pt->error = -ENOMEM; 501 return; 502 } 503 504 /* mark as double wq entry */ 505 wqe_private |= IO_WQE_F_DOUBLE; 506 io_init_poll_iocb(poll, first->events, first->wait.func); 507 if (!io_poll_double_prepare(req)) { 508 /* the request is completing, just back off */ 509 kfree(poll); 510 return; 511 } 512 *poll_ptr = poll; 513 } else { 514 /* fine to modify, there is no poll queued to race with us */ 515 req->flags |= REQ_F_SINGLE_POLL; 516 } 517 518 pt->nr_entries++; 519 poll->head = head; 520 poll->wait.private = (void *) wqe_private; 521 522 if (poll->events & EPOLLEXCLUSIVE) 523 add_wait_queue_exclusive(head, &poll->wait); 524 else 525 add_wait_queue(head, &poll->wait); 526 } 527 528 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, 529 struct poll_table_struct *p) 530 { 531 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); 532 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll); 533 534 __io_queue_proc(poll, pt, head, 535 (struct io_poll **) &pt->req->async_data); 536 } 537 538 static bool io_poll_can_finish_inline(struct io_kiocb *req, 539 struct io_poll_table *pt) 540 { 541 return pt->owning || io_poll_get_ownership(req); 542 } 543 544 /* 545 * Returns 0 when it's handed over for polling. The caller owns the requests if 546 * it returns non-zero, but otherwise should not touch it. Negative values 547 * contain an error code. When the result is >0, the polling has completed 548 * inline and ipt.result_mask is set to the mask. 549 */ 550 static int __io_arm_poll_handler(struct io_kiocb *req, 551 struct io_poll *poll, 552 struct io_poll_table *ipt, __poll_t mask, 553 unsigned issue_flags) 554 { 555 struct io_ring_ctx *ctx = req->ctx; 556 557 INIT_HLIST_NODE(&req->hash_node); 558 req->work.cancel_seq = atomic_read(&ctx->cancel_seq); 559 io_init_poll_iocb(poll, mask, io_poll_wake); 560 poll->file = req->file; 561 req->apoll_events = poll->events; 562 563 ipt->pt._key = mask; 564 ipt->req = req; 565 ipt->error = 0; 566 ipt->nr_entries = 0; 567 /* 568 * Polling is either completed here or via task_work, so if we're in the 569 * task context we're naturally serialised with tw by merit of running 570 * the same task. When it's io-wq, take the ownership to prevent tw 571 * from running. However, when we're in the task context, skip taking 572 * it as an optimisation. 573 * 574 * Note: even though the request won't be completed/freed, without 575 * ownership we still can race with io_poll_wake(). 576 * io_poll_can_finish_inline() tries to deal with that. 577 */ 578 ipt->owning = issue_flags & IO_URING_F_UNLOCKED; 579 atomic_set(&req->poll_refs, (int)ipt->owning); 580 581 /* io-wq doesn't hold uring_lock */ 582 if (issue_flags & IO_URING_F_UNLOCKED) 583 req->flags &= ~REQ_F_HASH_LOCKED; 584 585 mask = vfs_poll(req->file, &ipt->pt) & poll->events; 586 587 if (unlikely(ipt->error || !ipt->nr_entries)) { 588 io_poll_remove_entries(req); 589 590 if (!io_poll_can_finish_inline(req, ipt)) { 591 io_poll_mark_cancelled(req); 592 return 0; 593 } else if (mask && (poll->events & EPOLLET)) { 594 ipt->result_mask = mask; 595 return 1; 596 } 597 return ipt->error ?: -EINVAL; 598 } 599 600 if (mask && 601 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) { 602 if (!io_poll_can_finish_inline(req, ipt)) 603 return 0; 604 io_poll_remove_entries(req); 605 ipt->result_mask = mask; 606 /* no one else has access to the req, forget about the ref */ 607 return 1; 608 } 609 610 if (req->flags & REQ_F_HASH_LOCKED) 611 io_poll_req_insert_locked(req); 612 else 613 io_poll_req_insert(req); 614 615 if (mask && (poll->events & EPOLLET) && 616 io_poll_can_finish_inline(req, ipt)) { 617 __io_poll_execute(req, mask); 618 return 0; 619 } 620 621 if (ipt->owning) { 622 /* 623 * Try to release ownership. If we see a change of state, e.g. 624 * poll was waken up, queue up a tw, it'll deal with it. 625 */ 626 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) 627 __io_poll_execute(req, 0); 628 } 629 return 0; 630 } 631 632 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, 633 struct poll_table_struct *p) 634 { 635 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); 636 struct async_poll *apoll = pt->req->apoll; 637 638 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); 639 } 640 641 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req, 642 unsigned issue_flags) 643 { 644 struct io_ring_ctx *ctx = req->ctx; 645 struct io_cache_entry *entry; 646 struct async_poll *apoll; 647 648 if (req->flags & REQ_F_POLLED) { 649 apoll = req->apoll; 650 kfree(apoll->double_poll); 651 } else if (!(issue_flags & IO_URING_F_UNLOCKED) && 652 (entry = io_alloc_cache_get(&ctx->apoll_cache)) != NULL) { 653 apoll = container_of(entry, struct async_poll, cache); 654 } else { 655 apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); 656 if (unlikely(!apoll)) 657 return NULL; 658 } 659 apoll->double_poll = NULL; 660 req->apoll = apoll; 661 return apoll; 662 } 663 664 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags) 665 { 666 const struct io_op_def *def = &io_op_defs[req->opcode]; 667 struct async_poll *apoll; 668 struct io_poll_table ipt; 669 __poll_t mask = POLLPRI | POLLERR | EPOLLET; 670 int ret; 671 672 /* 673 * apoll requests already grab the mutex to complete in the tw handler, 674 * so removal from the mutex-backed hash is free, use it by default. 675 */ 676 req->flags |= REQ_F_HASH_LOCKED; 677 678 if (!def->pollin && !def->pollout) 679 return IO_APOLL_ABORTED; 680 if (!file_can_poll(req->file)) 681 return IO_APOLL_ABORTED; 682 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) 683 return IO_APOLL_ABORTED; 684 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) 685 mask |= EPOLLONESHOT; 686 687 if (def->pollin) { 688 mask |= EPOLLIN | EPOLLRDNORM; 689 690 /* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */ 691 if (req->flags & REQ_F_CLEAR_POLLIN) 692 mask &= ~EPOLLIN; 693 } else { 694 mask |= EPOLLOUT | EPOLLWRNORM; 695 } 696 if (def->poll_exclusive) 697 mask |= EPOLLEXCLUSIVE; 698 699 apoll = io_req_alloc_apoll(req, issue_flags); 700 if (!apoll) 701 return IO_APOLL_ABORTED; 702 req->flags |= REQ_F_POLLED; 703 ipt.pt._qproc = io_async_queue_proc; 704 705 io_kbuf_recycle(req, issue_flags); 706 707 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags); 708 if (ret) 709 return ret > 0 ? IO_APOLL_READY : IO_APOLL_ABORTED; 710 trace_io_uring_poll_arm(req, mask, apoll->poll.events); 711 return IO_APOLL_OK; 712 } 713 714 static __cold bool io_poll_remove_all_table(struct task_struct *tsk, 715 struct io_hash_table *table, 716 bool cancel_all) 717 { 718 unsigned nr_buckets = 1U << table->hash_bits; 719 struct hlist_node *tmp; 720 struct io_kiocb *req; 721 bool found = false; 722 int i; 723 724 for (i = 0; i < nr_buckets; i++) { 725 struct io_hash_bucket *hb = &table->hbs[i]; 726 727 spin_lock(&hb->lock); 728 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) { 729 if (io_match_task_safe(req, tsk, cancel_all)) { 730 hlist_del_init(&req->hash_node); 731 io_poll_cancel_req(req); 732 found = true; 733 } 734 } 735 spin_unlock(&hb->lock); 736 } 737 return found; 738 } 739 740 /* 741 * Returns true if we found and killed one or more poll requests 742 */ 743 __cold bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk, 744 bool cancel_all) 745 __must_hold(&ctx->uring_lock) 746 { 747 bool ret; 748 749 ret = io_poll_remove_all_table(tsk, &ctx->cancel_table, cancel_all); 750 ret |= io_poll_remove_all_table(tsk, &ctx->cancel_table_locked, cancel_all); 751 return ret; 752 } 753 754 static struct io_kiocb *io_poll_find(struct io_ring_ctx *ctx, bool poll_only, 755 struct io_cancel_data *cd, 756 struct io_hash_table *table, 757 struct io_hash_bucket **out_bucket) 758 { 759 struct io_kiocb *req; 760 u32 index = hash_long(cd->data, table->hash_bits); 761 struct io_hash_bucket *hb = &table->hbs[index]; 762 763 *out_bucket = NULL; 764 765 spin_lock(&hb->lock); 766 hlist_for_each_entry(req, &hb->list, hash_node) { 767 if (cd->data != req->cqe.user_data) 768 continue; 769 if (poll_only && req->opcode != IORING_OP_POLL_ADD) 770 continue; 771 if (cd->flags & IORING_ASYNC_CANCEL_ALL) { 772 if (cd->seq == req->work.cancel_seq) 773 continue; 774 req->work.cancel_seq = cd->seq; 775 } 776 *out_bucket = hb; 777 return req; 778 } 779 spin_unlock(&hb->lock); 780 return NULL; 781 } 782 783 static struct io_kiocb *io_poll_file_find(struct io_ring_ctx *ctx, 784 struct io_cancel_data *cd, 785 struct io_hash_table *table, 786 struct io_hash_bucket **out_bucket) 787 { 788 unsigned nr_buckets = 1U << table->hash_bits; 789 struct io_kiocb *req; 790 int i; 791 792 *out_bucket = NULL; 793 794 for (i = 0; i < nr_buckets; i++) { 795 struct io_hash_bucket *hb = &table->hbs[i]; 796 797 spin_lock(&hb->lock); 798 hlist_for_each_entry(req, &hb->list, hash_node) { 799 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && 800 req->file != cd->file) 801 continue; 802 if (cd->seq == req->work.cancel_seq) 803 continue; 804 req->work.cancel_seq = cd->seq; 805 *out_bucket = hb; 806 return req; 807 } 808 spin_unlock(&hb->lock); 809 } 810 return NULL; 811 } 812 813 static int io_poll_disarm(struct io_kiocb *req) 814 { 815 if (!req) 816 return -ENOENT; 817 if (!io_poll_get_ownership(req)) 818 return -EALREADY; 819 io_poll_remove_entries(req); 820 hash_del(&req->hash_node); 821 return 0; 822 } 823 824 static int __io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 825 struct io_hash_table *table) 826 { 827 struct io_hash_bucket *bucket; 828 struct io_kiocb *req; 829 830 if (cd->flags & (IORING_ASYNC_CANCEL_FD|IORING_ASYNC_CANCEL_ANY)) 831 req = io_poll_file_find(ctx, cd, table, &bucket); 832 else 833 req = io_poll_find(ctx, false, cd, table, &bucket); 834 835 if (req) 836 io_poll_cancel_req(req); 837 if (bucket) 838 spin_unlock(&bucket->lock); 839 return req ? 0 : -ENOENT; 840 } 841 842 int io_poll_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd, 843 unsigned issue_flags) 844 { 845 int ret; 846 847 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table); 848 if (ret != -ENOENT) 849 return ret; 850 851 io_ring_submit_lock(ctx, issue_flags); 852 ret = __io_poll_cancel(ctx, cd, &ctx->cancel_table_locked); 853 io_ring_submit_unlock(ctx, issue_flags); 854 return ret; 855 } 856 857 static __poll_t io_poll_parse_events(const struct io_uring_sqe *sqe, 858 unsigned int flags) 859 { 860 u32 events; 861 862 events = READ_ONCE(sqe->poll32_events); 863 #ifdef __BIG_ENDIAN 864 events = swahw32(events); 865 #endif 866 if (!(flags & IORING_POLL_ADD_MULTI)) 867 events |= EPOLLONESHOT; 868 if (!(flags & IORING_POLL_ADD_LEVEL)) 869 events |= EPOLLET; 870 return demangle_poll(events) | 871 (events & (EPOLLEXCLUSIVE|EPOLLONESHOT|EPOLLET)); 872 } 873 874 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 875 { 876 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update); 877 u32 flags; 878 879 if (sqe->buf_index || sqe->splice_fd_in) 880 return -EINVAL; 881 flags = READ_ONCE(sqe->len); 882 if (flags & ~(IORING_POLL_UPDATE_EVENTS | IORING_POLL_UPDATE_USER_DATA | 883 IORING_POLL_ADD_MULTI)) 884 return -EINVAL; 885 /* meaningless without update */ 886 if (flags == IORING_POLL_ADD_MULTI) 887 return -EINVAL; 888 889 upd->old_user_data = READ_ONCE(sqe->addr); 890 upd->update_events = flags & IORING_POLL_UPDATE_EVENTS; 891 upd->update_user_data = flags & IORING_POLL_UPDATE_USER_DATA; 892 893 upd->new_user_data = READ_ONCE(sqe->off); 894 if (!upd->update_user_data && upd->new_user_data) 895 return -EINVAL; 896 if (upd->update_events) 897 upd->events = io_poll_parse_events(sqe, flags); 898 else if (sqe->poll32_events) 899 return -EINVAL; 900 901 return 0; 902 } 903 904 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 905 { 906 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); 907 u32 flags; 908 909 if (sqe->buf_index || sqe->off || sqe->addr) 910 return -EINVAL; 911 flags = READ_ONCE(sqe->len); 912 if (flags & ~IORING_POLL_ADD_MULTI) 913 return -EINVAL; 914 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP)) 915 return -EINVAL; 916 917 poll->events = io_poll_parse_events(sqe, flags); 918 return 0; 919 } 920 921 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) 922 { 923 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll); 924 struct io_poll_table ipt; 925 int ret; 926 927 ipt.pt._qproc = io_poll_queue_proc; 928 929 /* 930 * If sqpoll or single issuer, there is no contention for ->uring_lock 931 * and we'll end up holding it in tw handlers anyway. 932 */ 933 if (req->ctx->flags & (IORING_SETUP_SQPOLL|IORING_SETUP_SINGLE_ISSUER)) 934 req->flags |= REQ_F_HASH_LOCKED; 935 936 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags); 937 if (ret > 0) { 938 io_req_set_res(req, ipt.result_mask, 0); 939 return IOU_OK; 940 } 941 return ret ?: IOU_ISSUE_SKIP_COMPLETE; 942 } 943 944 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags) 945 { 946 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update); 947 struct io_cancel_data cd = { .data = poll_update->old_user_data, }; 948 struct io_ring_ctx *ctx = req->ctx; 949 struct io_hash_bucket *bucket; 950 struct io_kiocb *preq; 951 int ret2, ret = 0; 952 bool locked; 953 954 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table, &bucket); 955 ret2 = io_poll_disarm(preq); 956 if (bucket) 957 spin_unlock(&bucket->lock); 958 if (!ret2) 959 goto found; 960 if (ret2 != -ENOENT) { 961 ret = ret2; 962 goto out; 963 } 964 965 io_ring_submit_lock(ctx, issue_flags); 966 preq = io_poll_find(ctx, true, &cd, &ctx->cancel_table_locked, &bucket); 967 ret2 = io_poll_disarm(preq); 968 if (bucket) 969 spin_unlock(&bucket->lock); 970 io_ring_submit_unlock(ctx, issue_flags); 971 if (ret2) { 972 ret = ret2; 973 goto out; 974 } 975 976 found: 977 if (WARN_ON_ONCE(preq->opcode != IORING_OP_POLL_ADD)) { 978 ret = -EFAULT; 979 goto out; 980 } 981 982 if (poll_update->update_events || poll_update->update_user_data) { 983 /* only mask one event flags, keep behavior flags */ 984 if (poll_update->update_events) { 985 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll); 986 987 poll->events &= ~0xffff; 988 poll->events |= poll_update->events & 0xffff; 989 poll->events |= IO_POLL_UNMASK; 990 } 991 if (poll_update->update_user_data) 992 preq->cqe.user_data = poll_update->new_user_data; 993 994 ret2 = io_poll_add(preq, issue_flags); 995 /* successfully updated, don't complete poll request */ 996 if (!ret2 || ret2 == -EIOCBQUEUED) 997 goto out; 998 } 999 1000 req_set_fail(preq); 1001 io_req_set_res(preq, -ECANCELED, 0); 1002 locked = !(issue_flags & IO_URING_F_UNLOCKED); 1003 io_req_task_complete(preq, &locked); 1004 out: 1005 if (ret < 0) { 1006 req_set_fail(req); 1007 return ret; 1008 } 1009 /* complete update request, we're done with it */ 1010 io_req_set_res(req, ret, 0); 1011 return IOU_OK; 1012 } 1013 1014 void io_apoll_cache_free(struct io_cache_entry *entry) 1015 { 1016 kfree(container_of(entry, struct async_poll, cache)); 1017 } 1018