1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/io_uring.h> 6 7 #include <trace/events/io_uring.h> 8 9 #include <uapi/linux/io_uring.h> 10 11 #include "io_uring.h" 12 #include "refs.h" 13 #include "cancel.h" 14 #include "timeout.h" 15 16 struct io_timeout { 17 struct file *file; 18 u32 off; 19 u32 target_seq; 20 u32 repeats; 21 struct list_head list; 22 /* head of the link, used by linked timeouts only */ 23 struct io_kiocb *head; 24 /* for linked completions */ 25 struct io_kiocb *prev; 26 }; 27 28 struct io_timeout_rem { 29 struct file *file; 30 u64 addr; 31 32 /* timeout update */ 33 struct timespec64 ts; 34 u32 flags; 35 bool ltimeout; 36 }; 37 38 static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, 39 struct io_kiocb *link); 40 41 static inline bool io_is_timeout_noseq(struct io_kiocb *req) 42 { 43 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 44 struct io_timeout_data *data = req->async_data; 45 46 return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT; 47 } 48 49 static inline void io_put_req(struct io_kiocb *req) 50 { 51 if (req_ref_put_and_test(req)) { 52 io_queue_next(req); 53 io_free_req(req); 54 } 55 } 56 57 static inline bool io_timeout_finish(struct io_timeout *timeout, 58 struct io_timeout_data *data) 59 { 60 if (!(data->flags & IORING_TIMEOUT_MULTISHOT)) 61 return true; 62 63 if (!timeout->off || (timeout->repeats && --timeout->repeats)) 64 return false; 65 66 return true; 67 } 68 69 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer); 70 71 static void io_timeout_complete(struct io_kiocb *req, io_tw_token_t tw) 72 { 73 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 74 struct io_timeout_data *data = req->async_data; 75 struct io_ring_ctx *ctx = req->ctx; 76 77 if (!io_timeout_finish(timeout, data)) { 78 if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) { 79 /* re-arm timer */ 80 raw_spin_lock_irq(&ctx->timeout_lock); 81 list_add(&timeout->list, ctx->timeout_list.prev); 82 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 83 raw_spin_unlock_irq(&ctx->timeout_lock); 84 return; 85 } 86 } 87 88 io_req_task_complete(req, tw); 89 } 90 91 static __cold bool io_flush_killed_timeouts(struct list_head *list, int err) 92 { 93 if (list_empty(list)) 94 return false; 95 96 while (!list_empty(list)) { 97 struct io_timeout *timeout; 98 struct io_kiocb *req; 99 100 timeout = list_first_entry(list, struct io_timeout, list); 101 list_del_init(&timeout->list); 102 req = cmd_to_io_kiocb(timeout); 103 if (err) 104 req_set_fail(req); 105 io_req_queue_tw_complete(req, err); 106 } 107 108 return true; 109 } 110 111 static void io_kill_timeout(struct io_kiocb *req, struct list_head *list) 112 __must_hold(&req->ctx->timeout_lock) 113 { 114 struct io_timeout_data *io = req->async_data; 115 116 if (hrtimer_try_to_cancel(&io->timer) != -1) { 117 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 118 119 atomic_set(&req->ctx->cq_timeouts, 120 atomic_read(&req->ctx->cq_timeouts) + 1); 121 list_move_tail(&timeout->list, list); 122 } 123 } 124 125 __cold void io_flush_timeouts(struct io_ring_ctx *ctx) 126 { 127 struct io_timeout *timeout, *tmp; 128 LIST_HEAD(list); 129 u32 seq; 130 131 raw_spin_lock_irq(&ctx->timeout_lock); 132 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); 133 134 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 135 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 136 u32 events_needed, events_got; 137 138 if (io_is_timeout_noseq(req)) 139 break; 140 141 /* 142 * Since seq can easily wrap around over time, subtract 143 * the last seq at which timeouts were flushed before comparing. 144 * Assuming not more than 2^31-1 events have happened since, 145 * these subtractions won't have wrapped, so we can check if 146 * target is in [last_seq, current_seq] by comparing the two. 147 */ 148 events_needed = timeout->target_seq - ctx->cq_last_tm_flush; 149 events_got = seq - ctx->cq_last_tm_flush; 150 if (events_got < events_needed) 151 break; 152 153 io_kill_timeout(req, &list); 154 } 155 ctx->cq_last_tm_flush = seq; 156 raw_spin_unlock_irq(&ctx->timeout_lock); 157 io_flush_killed_timeouts(&list, 0); 158 } 159 160 static void io_req_tw_fail_links(struct io_kiocb *link, io_tw_token_t tw) 161 { 162 io_tw_lock(link->ctx, tw); 163 while (link) { 164 struct io_kiocb *nxt = link->link; 165 long res = -ECANCELED; 166 167 if (link->flags & REQ_F_FAIL) 168 res = link->cqe.res; 169 link->link = NULL; 170 io_req_set_res(link, res, 0); 171 io_req_task_complete(link, tw); 172 link = nxt; 173 } 174 } 175 176 static void io_fail_links(struct io_kiocb *req) 177 __must_hold(&req->ctx->completion_lock) 178 { 179 struct io_kiocb *link = req->link; 180 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES; 181 182 if (!link) 183 return; 184 185 while (link) { 186 if (ignore_cqes) 187 link->flags |= REQ_F_CQE_SKIP; 188 else 189 link->flags &= ~REQ_F_CQE_SKIP; 190 trace_io_uring_fail_link(req, link); 191 link = link->link; 192 } 193 194 link = req->link; 195 link->io_task_work.func = io_req_tw_fail_links; 196 io_req_task_work_add(link); 197 req->link = NULL; 198 } 199 200 static inline void io_remove_next_linked(struct io_kiocb *req) 201 { 202 struct io_kiocb *nxt = req->link; 203 204 req->link = nxt->link; 205 nxt->link = NULL; 206 } 207 208 void io_disarm_next(struct io_kiocb *req) 209 __must_hold(&req->ctx->completion_lock) 210 { 211 struct io_kiocb *link = NULL; 212 213 if (req->flags & REQ_F_ARM_LTIMEOUT) { 214 link = req->link; 215 req->flags &= ~REQ_F_ARM_LTIMEOUT; 216 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { 217 io_remove_next_linked(req); 218 io_req_queue_tw_complete(link, -ECANCELED); 219 } 220 } else if (req->flags & REQ_F_LINK_TIMEOUT) { 221 struct io_ring_ctx *ctx = req->ctx; 222 223 raw_spin_lock_irq(&ctx->timeout_lock); 224 if (req->link && req->link->opcode == IORING_OP_LINK_TIMEOUT) 225 link = __io_disarm_linked_timeout(req, req->link); 226 227 raw_spin_unlock_irq(&ctx->timeout_lock); 228 if (link) 229 io_req_queue_tw_complete(link, -ECANCELED); 230 } 231 if (unlikely((req->flags & REQ_F_FAIL) && 232 !(req->flags & REQ_F_HARDLINK))) 233 io_fail_links(req); 234 } 235 236 static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, 237 struct io_kiocb *link) 238 __must_hold(&req->ctx->completion_lock) 239 __must_hold(&req->ctx->timeout_lock) 240 { 241 struct io_timeout_data *io = link->async_data; 242 struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout); 243 244 io_remove_next_linked(req); 245 timeout->head = NULL; 246 if (hrtimer_try_to_cancel(&io->timer) != -1) { 247 list_del(&timeout->list); 248 return link; 249 } 250 251 return NULL; 252 } 253 254 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) 255 { 256 struct io_timeout_data *data = container_of(timer, 257 struct io_timeout_data, timer); 258 struct io_kiocb *req = data->req; 259 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 260 struct io_ring_ctx *ctx = req->ctx; 261 unsigned long flags; 262 263 raw_spin_lock_irqsave(&ctx->timeout_lock, flags); 264 list_del_init(&timeout->list); 265 atomic_set(&req->ctx->cq_timeouts, 266 atomic_read(&req->ctx->cq_timeouts) + 1); 267 raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags); 268 269 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) 270 req_set_fail(req); 271 272 io_req_set_res(req, -ETIME, 0); 273 req->io_task_work.func = io_timeout_complete; 274 io_req_task_work_add(req); 275 return HRTIMER_NORESTART; 276 } 277 278 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, 279 struct io_cancel_data *cd) 280 __must_hold(&ctx->timeout_lock) 281 { 282 struct io_timeout *timeout; 283 struct io_timeout_data *io; 284 struct io_kiocb *req = NULL; 285 286 list_for_each_entry(timeout, &ctx->timeout_list, list) { 287 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 288 289 if (io_cancel_req_match(tmp, cd)) { 290 req = tmp; 291 break; 292 } 293 } 294 if (!req) 295 return ERR_PTR(-ENOENT); 296 297 io = req->async_data; 298 if (hrtimer_try_to_cancel(&io->timer) == -1) 299 return ERR_PTR(-EALREADY); 300 timeout = io_kiocb_to_cmd(req, struct io_timeout); 301 list_del_init(&timeout->list); 302 return req; 303 } 304 305 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) 306 __must_hold(&ctx->completion_lock) 307 { 308 struct io_kiocb *req; 309 310 raw_spin_lock_irq(&ctx->timeout_lock); 311 req = io_timeout_extract(ctx, cd); 312 raw_spin_unlock_irq(&ctx->timeout_lock); 313 314 if (IS_ERR(req)) 315 return PTR_ERR(req); 316 io_req_task_queue_fail(req, -ECANCELED); 317 return 0; 318 } 319 320 static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw) 321 { 322 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 323 struct io_kiocb *prev = timeout->prev; 324 int ret; 325 326 if (prev) { 327 if (!io_should_terminate_tw()) { 328 struct io_cancel_data cd = { 329 .ctx = req->ctx, 330 .data = prev->cqe.user_data, 331 }; 332 333 ret = io_try_cancel(req->tctx, &cd, 0); 334 } else { 335 ret = -ECANCELED; 336 } 337 io_req_set_res(req, ret ?: -ETIME, 0); 338 io_req_task_complete(req, tw); 339 io_put_req(prev); 340 } else { 341 io_req_set_res(req, -ETIME, 0); 342 io_req_task_complete(req, tw); 343 } 344 } 345 346 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) 347 { 348 struct io_timeout_data *data = container_of(timer, 349 struct io_timeout_data, timer); 350 struct io_kiocb *prev, *req = data->req; 351 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 352 struct io_ring_ctx *ctx = req->ctx; 353 unsigned long flags; 354 355 raw_spin_lock_irqsave(&ctx->timeout_lock, flags); 356 prev = timeout->head; 357 timeout->head = NULL; 358 359 /* 360 * We don't expect the list to be empty, that will only happen if we 361 * race with the completion of the linked work. 362 */ 363 if (prev) { 364 io_remove_next_linked(prev); 365 if (!req_ref_inc_not_zero(prev)) 366 prev = NULL; 367 } 368 list_del(&timeout->list); 369 timeout->prev = prev; 370 raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags); 371 372 req->io_task_work.func = io_req_task_link_timeout; 373 io_req_task_work_add(req); 374 return HRTIMER_NORESTART; 375 } 376 377 static clockid_t io_timeout_get_clock(struct io_timeout_data *data) 378 { 379 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { 380 case IORING_TIMEOUT_BOOTTIME: 381 return CLOCK_BOOTTIME; 382 case IORING_TIMEOUT_REALTIME: 383 return CLOCK_REALTIME; 384 default: 385 /* can't happen, vetted at prep time */ 386 WARN_ON_ONCE(1); 387 fallthrough; 388 case 0: 389 return CLOCK_MONOTONIC; 390 } 391 } 392 393 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 394 struct timespec64 *ts, enum hrtimer_mode mode) 395 __must_hold(&ctx->timeout_lock) 396 { 397 struct io_timeout_data *io; 398 struct io_timeout *timeout; 399 struct io_kiocb *req = NULL; 400 401 list_for_each_entry(timeout, &ctx->ltimeout_list, list) { 402 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 403 404 if (user_data == tmp->cqe.user_data) { 405 req = tmp; 406 break; 407 } 408 } 409 if (!req) 410 return -ENOENT; 411 412 io = req->async_data; 413 if (hrtimer_try_to_cancel(&io->timer) == -1) 414 return -EALREADY; 415 hrtimer_setup(&io->timer, io_link_timeout_fn, io_timeout_get_clock(io), mode); 416 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); 417 return 0; 418 } 419 420 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 421 struct timespec64 *ts, enum hrtimer_mode mode) 422 __must_hold(&ctx->timeout_lock) 423 { 424 struct io_cancel_data cd = { .ctx = ctx, .data = user_data, }; 425 struct io_kiocb *req = io_timeout_extract(ctx, &cd); 426 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 427 struct io_timeout_data *data; 428 429 if (IS_ERR(req)) 430 return PTR_ERR(req); 431 432 timeout->off = 0; /* noseq */ 433 data = req->async_data; 434 data->ts = *ts; 435 436 list_add_tail(&timeout->list, &ctx->timeout_list); 437 hrtimer_setup(&data->timer, io_timeout_fn, io_timeout_get_clock(data), mode); 438 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), mode); 439 return 0; 440 } 441 442 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 443 { 444 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 445 446 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 447 return -EINVAL; 448 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) 449 return -EINVAL; 450 451 tr->ltimeout = false; 452 tr->addr = READ_ONCE(sqe->addr); 453 tr->flags = READ_ONCE(sqe->timeout_flags); 454 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { 455 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 456 return -EINVAL; 457 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) 458 tr->ltimeout = true; 459 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) 460 return -EINVAL; 461 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) 462 return -EFAULT; 463 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0) 464 return -EINVAL; 465 } else if (tr->flags) { 466 /* timeout removal doesn't support flags */ 467 return -EINVAL; 468 } 469 470 return 0; 471 } 472 473 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) 474 { 475 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS 476 : HRTIMER_MODE_REL; 477 } 478 479 /* 480 * Remove or update an existing timeout command 481 */ 482 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) 483 { 484 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 485 struct io_ring_ctx *ctx = req->ctx; 486 int ret; 487 488 if (!(tr->flags & IORING_TIMEOUT_UPDATE)) { 489 struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, }; 490 491 spin_lock(&ctx->completion_lock); 492 ret = io_timeout_cancel(ctx, &cd); 493 spin_unlock(&ctx->completion_lock); 494 } else { 495 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); 496 497 raw_spin_lock_irq(&ctx->timeout_lock); 498 if (tr->ltimeout) 499 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); 500 else 501 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); 502 raw_spin_unlock_irq(&ctx->timeout_lock); 503 } 504 505 if (ret < 0) 506 req_set_fail(req); 507 io_req_set_res(req, ret, 0); 508 return IOU_COMPLETE; 509 } 510 511 static int __io_timeout_prep(struct io_kiocb *req, 512 const struct io_uring_sqe *sqe, 513 bool is_timeout_link) 514 { 515 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 516 struct io_timeout_data *data; 517 unsigned flags; 518 u32 off = READ_ONCE(sqe->off); 519 520 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) 521 return -EINVAL; 522 if (off && is_timeout_link) 523 return -EINVAL; 524 flags = READ_ONCE(sqe->timeout_flags); 525 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | 526 IORING_TIMEOUT_ETIME_SUCCESS | 527 IORING_TIMEOUT_MULTISHOT)) 528 return -EINVAL; 529 /* more than one clock specified is invalid, obviously */ 530 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 531 return -EINVAL; 532 /* multishot requests only make sense with rel values */ 533 if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS))) 534 return -EINVAL; 535 536 INIT_LIST_HEAD(&timeout->list); 537 timeout->off = off; 538 if (unlikely(off && !req->ctx->off_timeout_used)) 539 req->ctx->off_timeout_used = true; 540 /* 541 * for multishot reqs w/ fixed nr of repeats, repeats tracks the 542 * remaining nr 543 */ 544 timeout->repeats = 0; 545 if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0) 546 timeout->repeats = off; 547 548 if (WARN_ON_ONCE(req_has_async_data(req))) 549 return -EFAULT; 550 data = io_uring_alloc_async_data(NULL, req); 551 if (!data) 552 return -ENOMEM; 553 data->req = req; 554 data->flags = flags; 555 556 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) 557 return -EFAULT; 558 559 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) 560 return -EINVAL; 561 562 data->mode = io_translate_timeout_mode(flags); 563 564 if (is_timeout_link) { 565 struct io_submit_link *link = &req->ctx->submit_state.link; 566 567 if (!link->head) 568 return -EINVAL; 569 if (link->last->opcode == IORING_OP_LINK_TIMEOUT) 570 return -EINVAL; 571 timeout->head = link->last; 572 link->last->flags |= REQ_F_ARM_LTIMEOUT; 573 hrtimer_setup(&data->timer, io_link_timeout_fn, io_timeout_get_clock(data), 574 data->mode); 575 } else { 576 hrtimer_setup(&data->timer, io_timeout_fn, io_timeout_get_clock(data), data->mode); 577 } 578 return 0; 579 } 580 581 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 582 { 583 return __io_timeout_prep(req, sqe, false); 584 } 585 586 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 587 { 588 return __io_timeout_prep(req, sqe, true); 589 } 590 591 int io_timeout(struct io_kiocb *req, unsigned int issue_flags) 592 { 593 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 594 struct io_ring_ctx *ctx = req->ctx; 595 struct io_timeout_data *data = req->async_data; 596 struct list_head *entry; 597 u32 tail, off = timeout->off; 598 599 raw_spin_lock_irq(&ctx->timeout_lock); 600 601 /* 602 * sqe->off holds how many events that need to occur for this 603 * timeout event to be satisfied. If it isn't set, then this is 604 * a pure timeout request, sequence isn't used. 605 */ 606 if (io_is_timeout_noseq(req)) { 607 entry = ctx->timeout_list.prev; 608 goto add; 609 } 610 611 tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts); 612 timeout->target_seq = tail + off; 613 614 /* Update the last seq here in case io_flush_timeouts() hasn't. 615 * This is safe because ->completion_lock is held, and submissions 616 * and completions are never mixed in the same ->completion_lock section. 617 */ 618 ctx->cq_last_tm_flush = tail; 619 620 /* 621 * Insertion sort, ensuring the first entry in the list is always 622 * the one we need first. 623 */ 624 list_for_each_prev(entry, &ctx->timeout_list) { 625 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list); 626 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt); 627 628 if (io_is_timeout_noseq(nxt)) 629 continue; 630 /* nxt.seq is behind @tail, otherwise would've been completed */ 631 if (off >= nextt->target_seq - tail) 632 break; 633 } 634 add: 635 list_add(&timeout->list, entry); 636 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 637 raw_spin_unlock_irq(&ctx->timeout_lock); 638 return IOU_ISSUE_SKIP_COMPLETE; 639 } 640 641 void io_queue_linked_timeout(struct io_kiocb *req) 642 { 643 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 644 struct io_ring_ctx *ctx = req->ctx; 645 646 raw_spin_lock_irq(&ctx->timeout_lock); 647 /* 648 * If the back reference is NULL, then our linked request finished 649 * before we got a chance to setup the timer 650 */ 651 if (timeout->head) { 652 struct io_timeout_data *data = req->async_data; 653 654 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), 655 data->mode); 656 list_add_tail(&timeout->list, &ctx->ltimeout_list); 657 } 658 raw_spin_unlock_irq(&ctx->timeout_lock); 659 /* drop submission reference */ 660 io_put_req(req); 661 } 662 663 static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx, 664 bool cancel_all) 665 __must_hold(&head->ctx->timeout_lock) 666 { 667 struct io_kiocb *req; 668 669 if (tctx && head->tctx != tctx) 670 return false; 671 if (cancel_all) 672 return true; 673 674 io_for_each_link(req, head) { 675 if (req->flags & REQ_F_INFLIGHT) 676 return true; 677 } 678 return false; 679 } 680 681 /* Returns true if we found and killed one or more timeouts */ 682 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 683 bool cancel_all) 684 { 685 struct io_timeout *timeout, *tmp; 686 LIST_HEAD(list); 687 688 /* 689 * completion_lock is needed for io_match_task(). Take it before 690 * timeout_lockfirst to keep locking ordering. 691 */ 692 spin_lock(&ctx->completion_lock); 693 raw_spin_lock_irq(&ctx->timeout_lock); 694 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 695 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 696 697 if (io_match_task(req, tctx, cancel_all)) 698 io_kill_timeout(req, &list); 699 } 700 raw_spin_unlock_irq(&ctx->timeout_lock); 701 spin_unlock(&ctx->completion_lock); 702 703 return io_flush_killed_timeouts(&list, -ECANCELED); 704 } 705