1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/io_uring.h> 6 7 #include <trace/events/io_uring.h> 8 9 #include <uapi/linux/io_uring.h> 10 11 #include "io_uring.h" 12 #include "refs.h" 13 #include "cancel.h" 14 #include "timeout.h" 15 16 struct io_timeout { 17 struct file *file; 18 u32 off; 19 u32 target_seq; 20 u32 repeats; 21 struct list_head list; 22 /* head of the link, used by linked timeouts only */ 23 struct io_kiocb *head; 24 /* for linked completions */ 25 struct io_kiocb *prev; 26 }; 27 28 struct io_timeout_rem { 29 struct file *file; 30 u64 addr; 31 32 /* timeout update */ 33 struct timespec64 ts; 34 u32 flags; 35 bool ltimeout; 36 }; 37 38 static inline bool io_is_timeout_noseq(struct io_kiocb *req) 39 { 40 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 41 struct io_timeout_data *data = req->async_data; 42 43 return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT; 44 } 45 46 static inline void io_put_req(struct io_kiocb *req) 47 { 48 if (req_ref_put_and_test(req)) { 49 io_queue_next(req); 50 io_free_req(req); 51 } 52 } 53 54 static inline bool io_timeout_finish(struct io_timeout *timeout, 55 struct io_timeout_data *data) 56 { 57 if (!(data->flags & IORING_TIMEOUT_MULTISHOT)) 58 return true; 59 60 if (!timeout->off || (timeout->repeats && --timeout->repeats)) 61 return false; 62 63 return true; 64 } 65 66 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer); 67 68 static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) 69 { 70 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 71 struct io_timeout_data *data = req->async_data; 72 struct io_ring_ctx *ctx = req->ctx; 73 74 if (!io_timeout_finish(timeout, data)) { 75 if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) { 76 /* re-arm timer */ 77 raw_spin_lock_irq(&ctx->timeout_lock); 78 list_add(&timeout->list, ctx->timeout_list.prev); 79 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 80 raw_spin_unlock_irq(&ctx->timeout_lock); 81 return; 82 } 83 } 84 85 io_req_task_complete(req, ts); 86 } 87 88 static __cold bool io_flush_killed_timeouts(struct list_head *list, int err) 89 { 90 if (list_empty(list)) 91 return false; 92 93 while (!list_empty(list)) { 94 struct io_timeout *timeout; 95 struct io_kiocb *req; 96 97 timeout = list_first_entry(list, struct io_timeout, list); 98 list_del_init(&timeout->list); 99 req = cmd_to_io_kiocb(timeout); 100 if (err) 101 req_set_fail(req); 102 io_req_queue_tw_complete(req, err); 103 } 104 105 return true; 106 } 107 108 static void io_kill_timeout(struct io_kiocb *req, struct list_head *list) 109 __must_hold(&req->ctx->timeout_lock) 110 { 111 struct io_timeout_data *io = req->async_data; 112 113 if (hrtimer_try_to_cancel(&io->timer) != -1) { 114 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 115 116 atomic_set(&req->ctx->cq_timeouts, 117 atomic_read(&req->ctx->cq_timeouts) + 1); 118 list_move_tail(&timeout->list, list); 119 } 120 } 121 122 __cold void io_flush_timeouts(struct io_ring_ctx *ctx) 123 { 124 struct io_timeout *timeout, *tmp; 125 LIST_HEAD(list); 126 u32 seq; 127 128 raw_spin_lock_irq(&ctx->timeout_lock); 129 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); 130 131 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 132 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 133 u32 events_needed, events_got; 134 135 if (io_is_timeout_noseq(req)) 136 break; 137 138 /* 139 * Since seq can easily wrap around over time, subtract 140 * the last seq at which timeouts were flushed before comparing. 141 * Assuming not more than 2^31-1 events have happened since, 142 * these subtractions won't have wrapped, so we can check if 143 * target is in [last_seq, current_seq] by comparing the two. 144 */ 145 events_needed = timeout->target_seq - ctx->cq_last_tm_flush; 146 events_got = seq - ctx->cq_last_tm_flush; 147 if (events_got < events_needed) 148 break; 149 150 io_kill_timeout(req, &list); 151 } 152 ctx->cq_last_tm_flush = seq; 153 raw_spin_unlock_irq(&ctx->timeout_lock); 154 io_flush_killed_timeouts(&list, 0); 155 } 156 157 static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts) 158 { 159 io_tw_lock(link->ctx, ts); 160 while (link) { 161 struct io_kiocb *nxt = link->link; 162 long res = -ECANCELED; 163 164 if (link->flags & REQ_F_FAIL) 165 res = link->cqe.res; 166 link->link = NULL; 167 io_req_set_res(link, res, 0); 168 io_req_task_complete(link, ts); 169 link = nxt; 170 } 171 } 172 173 static void io_fail_links(struct io_kiocb *req) 174 __must_hold(&req->ctx->completion_lock) 175 { 176 struct io_kiocb *link = req->link; 177 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES; 178 179 if (!link) 180 return; 181 182 while (link) { 183 if (ignore_cqes) 184 link->flags |= REQ_F_CQE_SKIP; 185 else 186 link->flags &= ~REQ_F_CQE_SKIP; 187 trace_io_uring_fail_link(req, link); 188 link = link->link; 189 } 190 191 link = req->link; 192 link->io_task_work.func = io_req_tw_fail_links; 193 io_req_task_work_add(link); 194 req->link = NULL; 195 } 196 197 static inline void io_remove_next_linked(struct io_kiocb *req) 198 { 199 struct io_kiocb *nxt = req->link; 200 201 req->link = nxt->link; 202 nxt->link = NULL; 203 } 204 205 void io_disarm_next(struct io_kiocb *req) 206 __must_hold(&req->ctx->completion_lock) 207 { 208 struct io_kiocb *link = NULL; 209 210 if (req->flags & REQ_F_ARM_LTIMEOUT) { 211 link = req->link; 212 req->flags &= ~REQ_F_ARM_LTIMEOUT; 213 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { 214 io_remove_next_linked(req); 215 io_req_queue_tw_complete(link, -ECANCELED); 216 } 217 } else if (req->flags & REQ_F_LINK_TIMEOUT) { 218 struct io_ring_ctx *ctx = req->ctx; 219 220 raw_spin_lock_irq(&ctx->timeout_lock); 221 link = io_disarm_linked_timeout(req); 222 raw_spin_unlock_irq(&ctx->timeout_lock); 223 if (link) 224 io_req_queue_tw_complete(link, -ECANCELED); 225 } 226 if (unlikely((req->flags & REQ_F_FAIL) && 227 !(req->flags & REQ_F_HARDLINK))) 228 io_fail_links(req); 229 } 230 231 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, 232 struct io_kiocb *link) 233 __must_hold(&req->ctx->completion_lock) 234 __must_hold(&req->ctx->timeout_lock) 235 { 236 struct io_timeout_data *io = link->async_data; 237 struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout); 238 239 io_remove_next_linked(req); 240 timeout->head = NULL; 241 if (hrtimer_try_to_cancel(&io->timer) != -1) { 242 list_del(&timeout->list); 243 return link; 244 } 245 246 return NULL; 247 } 248 249 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) 250 { 251 struct io_timeout_data *data = container_of(timer, 252 struct io_timeout_data, timer); 253 struct io_kiocb *req = data->req; 254 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 255 struct io_ring_ctx *ctx = req->ctx; 256 unsigned long flags; 257 258 raw_spin_lock_irqsave(&ctx->timeout_lock, flags); 259 list_del_init(&timeout->list); 260 atomic_set(&req->ctx->cq_timeouts, 261 atomic_read(&req->ctx->cq_timeouts) + 1); 262 raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags); 263 264 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) 265 req_set_fail(req); 266 267 io_req_set_res(req, -ETIME, 0); 268 req->io_task_work.func = io_timeout_complete; 269 io_req_task_work_add(req); 270 return HRTIMER_NORESTART; 271 } 272 273 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, 274 struct io_cancel_data *cd) 275 __must_hold(&ctx->timeout_lock) 276 { 277 struct io_timeout *timeout; 278 struct io_timeout_data *io; 279 struct io_kiocb *req = NULL; 280 281 list_for_each_entry(timeout, &ctx->timeout_list, list) { 282 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 283 284 if (io_cancel_req_match(tmp, cd)) { 285 req = tmp; 286 break; 287 } 288 } 289 if (!req) 290 return ERR_PTR(-ENOENT); 291 292 io = req->async_data; 293 if (hrtimer_try_to_cancel(&io->timer) == -1) 294 return ERR_PTR(-EALREADY); 295 timeout = io_kiocb_to_cmd(req, struct io_timeout); 296 list_del_init(&timeout->list); 297 return req; 298 } 299 300 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) 301 __must_hold(&ctx->completion_lock) 302 { 303 struct io_kiocb *req; 304 305 raw_spin_lock_irq(&ctx->timeout_lock); 306 req = io_timeout_extract(ctx, cd); 307 raw_spin_unlock_irq(&ctx->timeout_lock); 308 309 if (IS_ERR(req)) 310 return PTR_ERR(req); 311 io_req_task_queue_fail(req, -ECANCELED); 312 return 0; 313 } 314 315 static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts) 316 { 317 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 318 struct io_kiocb *prev = timeout->prev; 319 int ret; 320 321 if (prev) { 322 if (!io_should_terminate_tw()) { 323 struct io_cancel_data cd = { 324 .ctx = req->ctx, 325 .data = prev->cqe.user_data, 326 }; 327 328 ret = io_try_cancel(req->tctx, &cd, 0); 329 } else { 330 ret = -ECANCELED; 331 } 332 io_req_set_res(req, ret ?: -ETIME, 0); 333 io_req_task_complete(req, ts); 334 io_put_req(prev); 335 } else { 336 io_req_set_res(req, -ETIME, 0); 337 io_req_task_complete(req, ts); 338 } 339 } 340 341 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) 342 { 343 struct io_timeout_data *data = container_of(timer, 344 struct io_timeout_data, timer); 345 struct io_kiocb *prev, *req = data->req; 346 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 347 struct io_ring_ctx *ctx = req->ctx; 348 unsigned long flags; 349 350 raw_spin_lock_irqsave(&ctx->timeout_lock, flags); 351 prev = timeout->head; 352 timeout->head = NULL; 353 354 /* 355 * We don't expect the list to be empty, that will only happen if we 356 * race with the completion of the linked work. 357 */ 358 if (prev) { 359 io_remove_next_linked(prev); 360 if (!req_ref_inc_not_zero(prev)) 361 prev = NULL; 362 } 363 list_del(&timeout->list); 364 timeout->prev = prev; 365 raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags); 366 367 req->io_task_work.func = io_req_task_link_timeout; 368 io_req_task_work_add(req); 369 return HRTIMER_NORESTART; 370 } 371 372 static clockid_t io_timeout_get_clock(struct io_timeout_data *data) 373 { 374 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { 375 case IORING_TIMEOUT_BOOTTIME: 376 return CLOCK_BOOTTIME; 377 case IORING_TIMEOUT_REALTIME: 378 return CLOCK_REALTIME; 379 default: 380 /* can't happen, vetted at prep time */ 381 WARN_ON_ONCE(1); 382 fallthrough; 383 case 0: 384 return CLOCK_MONOTONIC; 385 } 386 } 387 388 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 389 struct timespec64 *ts, enum hrtimer_mode mode) 390 __must_hold(&ctx->timeout_lock) 391 { 392 struct io_timeout_data *io; 393 struct io_timeout *timeout; 394 struct io_kiocb *req = NULL; 395 396 list_for_each_entry(timeout, &ctx->ltimeout_list, list) { 397 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 398 399 if (user_data == tmp->cqe.user_data) { 400 req = tmp; 401 break; 402 } 403 } 404 if (!req) 405 return -ENOENT; 406 407 io = req->async_data; 408 if (hrtimer_try_to_cancel(&io->timer) == -1) 409 return -EALREADY; 410 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode); 411 io->timer.function = io_link_timeout_fn; 412 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); 413 return 0; 414 } 415 416 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 417 struct timespec64 *ts, enum hrtimer_mode mode) 418 __must_hold(&ctx->timeout_lock) 419 { 420 struct io_cancel_data cd = { .ctx = ctx, .data = user_data, }; 421 struct io_kiocb *req = io_timeout_extract(ctx, &cd); 422 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 423 struct io_timeout_data *data; 424 425 if (IS_ERR(req)) 426 return PTR_ERR(req); 427 428 timeout->off = 0; /* noseq */ 429 data = req->async_data; 430 data->ts = *ts; 431 432 list_add_tail(&timeout->list, &ctx->timeout_list); 433 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode); 434 data->timer.function = io_timeout_fn; 435 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), mode); 436 return 0; 437 } 438 439 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 440 { 441 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 442 443 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 444 return -EINVAL; 445 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) 446 return -EINVAL; 447 448 tr->ltimeout = false; 449 tr->addr = READ_ONCE(sqe->addr); 450 tr->flags = READ_ONCE(sqe->timeout_flags); 451 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { 452 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 453 return -EINVAL; 454 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) 455 tr->ltimeout = true; 456 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) 457 return -EINVAL; 458 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) 459 return -EFAULT; 460 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0) 461 return -EINVAL; 462 } else if (tr->flags) { 463 /* timeout removal doesn't support flags */ 464 return -EINVAL; 465 } 466 467 return 0; 468 } 469 470 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) 471 { 472 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS 473 : HRTIMER_MODE_REL; 474 } 475 476 /* 477 * Remove or update an existing timeout command 478 */ 479 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) 480 { 481 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 482 struct io_ring_ctx *ctx = req->ctx; 483 int ret; 484 485 if (!(tr->flags & IORING_TIMEOUT_UPDATE)) { 486 struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, }; 487 488 spin_lock(&ctx->completion_lock); 489 ret = io_timeout_cancel(ctx, &cd); 490 spin_unlock(&ctx->completion_lock); 491 } else { 492 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); 493 494 raw_spin_lock_irq(&ctx->timeout_lock); 495 if (tr->ltimeout) 496 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); 497 else 498 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); 499 raw_spin_unlock_irq(&ctx->timeout_lock); 500 } 501 502 if (ret < 0) 503 req_set_fail(req); 504 io_req_set_res(req, ret, 0); 505 return IOU_OK; 506 } 507 508 static int __io_timeout_prep(struct io_kiocb *req, 509 const struct io_uring_sqe *sqe, 510 bool is_timeout_link) 511 { 512 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 513 struct io_timeout_data *data; 514 unsigned flags; 515 u32 off = READ_ONCE(sqe->off); 516 517 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) 518 return -EINVAL; 519 if (off && is_timeout_link) 520 return -EINVAL; 521 flags = READ_ONCE(sqe->timeout_flags); 522 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | 523 IORING_TIMEOUT_ETIME_SUCCESS | 524 IORING_TIMEOUT_MULTISHOT)) 525 return -EINVAL; 526 /* more than one clock specified is invalid, obviously */ 527 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 528 return -EINVAL; 529 /* multishot requests only make sense with rel values */ 530 if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS))) 531 return -EINVAL; 532 533 INIT_LIST_HEAD(&timeout->list); 534 timeout->off = off; 535 if (unlikely(off && !req->ctx->off_timeout_used)) 536 req->ctx->off_timeout_used = true; 537 /* 538 * for multishot reqs w/ fixed nr of repeats, repeats tracks the 539 * remaining nr 540 */ 541 timeout->repeats = 0; 542 if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0) 543 timeout->repeats = off; 544 545 if (WARN_ON_ONCE(req_has_async_data(req))) 546 return -EFAULT; 547 if (io_alloc_async_data(req)) 548 return -ENOMEM; 549 550 data = req->async_data; 551 data->req = req; 552 data->flags = flags; 553 554 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) 555 return -EFAULT; 556 557 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) 558 return -EINVAL; 559 560 data->mode = io_translate_timeout_mode(flags); 561 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); 562 563 if (is_timeout_link) { 564 struct io_submit_link *link = &req->ctx->submit_state.link; 565 566 if (!link->head) 567 return -EINVAL; 568 if (link->last->opcode == IORING_OP_LINK_TIMEOUT) 569 return -EINVAL; 570 timeout->head = link->last; 571 link->last->flags |= REQ_F_ARM_LTIMEOUT; 572 } 573 return 0; 574 } 575 576 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 577 { 578 return __io_timeout_prep(req, sqe, false); 579 } 580 581 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 582 { 583 return __io_timeout_prep(req, sqe, true); 584 } 585 586 int io_timeout(struct io_kiocb *req, unsigned int issue_flags) 587 { 588 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 589 struct io_ring_ctx *ctx = req->ctx; 590 struct io_timeout_data *data = req->async_data; 591 struct list_head *entry; 592 u32 tail, off = timeout->off; 593 594 raw_spin_lock_irq(&ctx->timeout_lock); 595 596 /* 597 * sqe->off holds how many events that need to occur for this 598 * timeout event to be satisfied. If it isn't set, then this is 599 * a pure timeout request, sequence isn't used. 600 */ 601 if (io_is_timeout_noseq(req)) { 602 entry = ctx->timeout_list.prev; 603 goto add; 604 } 605 606 tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts); 607 timeout->target_seq = tail + off; 608 609 /* Update the last seq here in case io_flush_timeouts() hasn't. 610 * This is safe because ->completion_lock is held, and submissions 611 * and completions are never mixed in the same ->completion_lock section. 612 */ 613 ctx->cq_last_tm_flush = tail; 614 615 /* 616 * Insertion sort, ensuring the first entry in the list is always 617 * the one we need first. 618 */ 619 list_for_each_prev(entry, &ctx->timeout_list) { 620 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list); 621 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt); 622 623 if (io_is_timeout_noseq(nxt)) 624 continue; 625 /* nxt.seq is behind @tail, otherwise would've been completed */ 626 if (off >= nextt->target_seq - tail) 627 break; 628 } 629 add: 630 list_add(&timeout->list, entry); 631 data->timer.function = io_timeout_fn; 632 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 633 raw_spin_unlock_irq(&ctx->timeout_lock); 634 return IOU_ISSUE_SKIP_COMPLETE; 635 } 636 637 void io_queue_linked_timeout(struct io_kiocb *req) 638 { 639 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 640 struct io_ring_ctx *ctx = req->ctx; 641 642 raw_spin_lock_irq(&ctx->timeout_lock); 643 /* 644 * If the back reference is NULL, then our linked request finished 645 * before we got a chance to setup the timer 646 */ 647 if (timeout->head) { 648 struct io_timeout_data *data = req->async_data; 649 650 data->timer.function = io_link_timeout_fn; 651 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), 652 data->mode); 653 list_add_tail(&timeout->list, &ctx->ltimeout_list); 654 } 655 raw_spin_unlock_irq(&ctx->timeout_lock); 656 /* drop submission reference */ 657 io_put_req(req); 658 } 659 660 static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx, 661 bool cancel_all) 662 __must_hold(&head->ctx->timeout_lock) 663 { 664 struct io_kiocb *req; 665 666 if (tctx && head->tctx != tctx) 667 return false; 668 if (cancel_all) 669 return true; 670 671 io_for_each_link(req, head) { 672 if (req->flags & REQ_F_INFLIGHT) 673 return true; 674 } 675 return false; 676 } 677 678 /* Returns true if we found and killed one or more timeouts */ 679 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 680 bool cancel_all) 681 { 682 struct io_timeout *timeout, *tmp; 683 LIST_HEAD(list); 684 685 /* 686 * completion_lock is needed for io_match_task(). Take it before 687 * timeout_lockfirst to keep locking ordering. 688 */ 689 spin_lock(&ctx->completion_lock); 690 raw_spin_lock_irq(&ctx->timeout_lock); 691 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 692 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 693 694 if (io_match_task(req, tctx, cancel_all)) 695 io_kill_timeout(req, &list); 696 } 697 raw_spin_unlock_irq(&ctx->timeout_lock); 698 spin_unlock(&ctx->completion_lock); 699 700 return io_flush_killed_timeouts(&list, -ECANCELED); 701 } 702