1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/io_uring.h> 6 7 #include <trace/events/io_uring.h> 8 9 #include <uapi/linux/io_uring.h> 10 11 #include "io_uring.h" 12 #include "refs.h" 13 #include "cancel.h" 14 #include "timeout.h" 15 16 struct io_timeout { 17 struct file *file; 18 u32 off; 19 u32 target_seq; 20 u32 repeats; 21 struct list_head list; 22 /* head of the link, used by linked timeouts only */ 23 struct io_kiocb *head; 24 /* for linked completions */ 25 struct io_kiocb *prev; 26 }; 27 28 struct io_timeout_rem { 29 struct file *file; 30 u64 addr; 31 32 /* timeout update */ 33 struct timespec64 ts; 34 u32 flags; 35 bool ltimeout; 36 }; 37 38 static inline bool io_is_timeout_noseq(struct io_kiocb *req) 39 { 40 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 41 struct io_timeout_data *data = req->async_data; 42 43 return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT; 44 } 45 46 static inline void io_put_req(struct io_kiocb *req) 47 { 48 if (req_ref_put_and_test(req)) { 49 io_queue_next(req); 50 io_free_req(req); 51 } 52 } 53 54 static inline bool io_timeout_finish(struct io_timeout *timeout, 55 struct io_timeout_data *data) 56 { 57 if (!(data->flags & IORING_TIMEOUT_MULTISHOT)) 58 return true; 59 60 if (!timeout->off || (timeout->repeats && --timeout->repeats)) 61 return false; 62 63 return true; 64 } 65 66 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer); 67 68 static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) 69 { 70 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 71 struct io_timeout_data *data = req->async_data; 72 struct io_ring_ctx *ctx = req->ctx; 73 74 if (!io_timeout_finish(timeout, data)) { 75 if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) { 76 /* re-arm timer */ 77 spin_lock_irq(&ctx->timeout_lock); 78 list_add(&timeout->list, ctx->timeout_list.prev); 79 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 80 spin_unlock_irq(&ctx->timeout_lock); 81 return; 82 } 83 } 84 85 io_req_task_complete(req, ts); 86 } 87 88 static bool io_kill_timeout(struct io_kiocb *req, int status) 89 __must_hold(&req->ctx->timeout_lock) 90 { 91 struct io_timeout_data *io = req->async_data; 92 93 if (hrtimer_try_to_cancel(&io->timer) != -1) { 94 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 95 96 if (status) 97 req_set_fail(req); 98 atomic_set(&req->ctx->cq_timeouts, 99 atomic_read(&req->ctx->cq_timeouts) + 1); 100 list_del_init(&timeout->list); 101 io_req_queue_tw_complete(req, status); 102 return true; 103 } 104 return false; 105 } 106 107 __cold void io_flush_timeouts(struct io_ring_ctx *ctx) 108 { 109 u32 seq; 110 struct io_timeout *timeout, *tmp; 111 112 spin_lock_irq(&ctx->timeout_lock); 113 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); 114 115 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 116 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 117 u32 events_needed, events_got; 118 119 if (io_is_timeout_noseq(req)) 120 break; 121 122 /* 123 * Since seq can easily wrap around over time, subtract 124 * the last seq at which timeouts were flushed before comparing. 125 * Assuming not more than 2^31-1 events have happened since, 126 * these subtractions won't have wrapped, so we can check if 127 * target is in [last_seq, current_seq] by comparing the two. 128 */ 129 events_needed = timeout->target_seq - ctx->cq_last_tm_flush; 130 events_got = seq - ctx->cq_last_tm_flush; 131 if (events_got < events_needed) 132 break; 133 134 io_kill_timeout(req, 0); 135 } 136 ctx->cq_last_tm_flush = seq; 137 spin_unlock_irq(&ctx->timeout_lock); 138 } 139 140 static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts) 141 { 142 io_tw_lock(link->ctx, ts); 143 while (link) { 144 struct io_kiocb *nxt = link->link; 145 long res = -ECANCELED; 146 147 if (link->flags & REQ_F_FAIL) 148 res = link->cqe.res; 149 link->link = NULL; 150 io_req_set_res(link, res, 0); 151 io_req_task_complete(link, ts); 152 link = nxt; 153 } 154 } 155 156 static void io_fail_links(struct io_kiocb *req) 157 __must_hold(&req->ctx->completion_lock) 158 { 159 struct io_kiocb *link = req->link; 160 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES; 161 162 if (!link) 163 return; 164 165 while (link) { 166 if (ignore_cqes) 167 link->flags |= REQ_F_CQE_SKIP; 168 else 169 link->flags &= ~REQ_F_CQE_SKIP; 170 trace_io_uring_fail_link(req, link); 171 link = link->link; 172 } 173 174 link = req->link; 175 link->io_task_work.func = io_req_tw_fail_links; 176 io_req_task_work_add(link); 177 req->link = NULL; 178 } 179 180 static inline void io_remove_next_linked(struct io_kiocb *req) 181 { 182 struct io_kiocb *nxt = req->link; 183 184 req->link = nxt->link; 185 nxt->link = NULL; 186 } 187 188 void io_disarm_next(struct io_kiocb *req) 189 __must_hold(&req->ctx->completion_lock) 190 { 191 struct io_kiocb *link = NULL; 192 193 if (req->flags & REQ_F_ARM_LTIMEOUT) { 194 link = req->link; 195 req->flags &= ~REQ_F_ARM_LTIMEOUT; 196 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { 197 io_remove_next_linked(req); 198 io_req_queue_tw_complete(link, -ECANCELED); 199 } 200 } else if (req->flags & REQ_F_LINK_TIMEOUT) { 201 struct io_ring_ctx *ctx = req->ctx; 202 203 spin_lock_irq(&ctx->timeout_lock); 204 link = io_disarm_linked_timeout(req); 205 spin_unlock_irq(&ctx->timeout_lock); 206 if (link) 207 io_req_queue_tw_complete(link, -ECANCELED); 208 } 209 if (unlikely((req->flags & REQ_F_FAIL) && 210 !(req->flags & REQ_F_HARDLINK))) 211 io_fail_links(req); 212 } 213 214 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, 215 struct io_kiocb *link) 216 __must_hold(&req->ctx->completion_lock) 217 __must_hold(&req->ctx->timeout_lock) 218 { 219 struct io_timeout_data *io = link->async_data; 220 struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout); 221 222 io_remove_next_linked(req); 223 timeout->head = NULL; 224 if (hrtimer_try_to_cancel(&io->timer) != -1) { 225 list_del(&timeout->list); 226 return link; 227 } 228 229 return NULL; 230 } 231 232 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) 233 { 234 struct io_timeout_data *data = container_of(timer, 235 struct io_timeout_data, timer); 236 struct io_kiocb *req = data->req; 237 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 238 struct io_ring_ctx *ctx = req->ctx; 239 unsigned long flags; 240 241 spin_lock_irqsave(&ctx->timeout_lock, flags); 242 list_del_init(&timeout->list); 243 atomic_set(&req->ctx->cq_timeouts, 244 atomic_read(&req->ctx->cq_timeouts) + 1); 245 spin_unlock_irqrestore(&ctx->timeout_lock, flags); 246 247 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) 248 req_set_fail(req); 249 250 io_req_set_res(req, -ETIME, 0); 251 req->io_task_work.func = io_timeout_complete; 252 io_req_task_work_add(req); 253 return HRTIMER_NORESTART; 254 } 255 256 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, 257 struct io_cancel_data *cd) 258 __must_hold(&ctx->timeout_lock) 259 { 260 struct io_timeout *timeout; 261 struct io_timeout_data *io; 262 struct io_kiocb *req = NULL; 263 264 list_for_each_entry(timeout, &ctx->timeout_list, list) { 265 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 266 267 if (io_cancel_req_match(tmp, cd)) { 268 req = tmp; 269 break; 270 } 271 } 272 if (!req) 273 return ERR_PTR(-ENOENT); 274 275 io = req->async_data; 276 if (hrtimer_try_to_cancel(&io->timer) == -1) 277 return ERR_PTR(-EALREADY); 278 timeout = io_kiocb_to_cmd(req, struct io_timeout); 279 list_del_init(&timeout->list); 280 return req; 281 } 282 283 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) 284 __must_hold(&ctx->completion_lock) 285 { 286 struct io_kiocb *req; 287 288 spin_lock_irq(&ctx->timeout_lock); 289 req = io_timeout_extract(ctx, cd); 290 spin_unlock_irq(&ctx->timeout_lock); 291 292 if (IS_ERR(req)) 293 return PTR_ERR(req); 294 io_req_task_queue_fail(req, -ECANCELED); 295 return 0; 296 } 297 298 static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts) 299 { 300 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 301 struct io_kiocb *prev = timeout->prev; 302 int ret; 303 304 if (prev) { 305 if (!io_should_terminate_tw()) { 306 struct io_cancel_data cd = { 307 .ctx = req->ctx, 308 .data = prev->cqe.user_data, 309 }; 310 311 ret = io_try_cancel(req->tctx, &cd, 0); 312 } else { 313 ret = -ECANCELED; 314 } 315 io_req_set_res(req, ret ?: -ETIME, 0); 316 io_req_task_complete(req, ts); 317 io_put_req(prev); 318 } else { 319 io_req_set_res(req, -ETIME, 0); 320 io_req_task_complete(req, ts); 321 } 322 } 323 324 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) 325 { 326 struct io_timeout_data *data = container_of(timer, 327 struct io_timeout_data, timer); 328 struct io_kiocb *prev, *req = data->req; 329 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 330 struct io_ring_ctx *ctx = req->ctx; 331 unsigned long flags; 332 333 spin_lock_irqsave(&ctx->timeout_lock, flags); 334 prev = timeout->head; 335 timeout->head = NULL; 336 337 /* 338 * We don't expect the list to be empty, that will only happen if we 339 * race with the completion of the linked work. 340 */ 341 if (prev) { 342 io_remove_next_linked(prev); 343 if (!req_ref_inc_not_zero(prev)) 344 prev = NULL; 345 } 346 list_del(&timeout->list); 347 timeout->prev = prev; 348 spin_unlock_irqrestore(&ctx->timeout_lock, flags); 349 350 req->io_task_work.func = io_req_task_link_timeout; 351 io_req_task_work_add(req); 352 return HRTIMER_NORESTART; 353 } 354 355 static clockid_t io_timeout_get_clock(struct io_timeout_data *data) 356 { 357 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { 358 case IORING_TIMEOUT_BOOTTIME: 359 return CLOCK_BOOTTIME; 360 case IORING_TIMEOUT_REALTIME: 361 return CLOCK_REALTIME; 362 default: 363 /* can't happen, vetted at prep time */ 364 WARN_ON_ONCE(1); 365 fallthrough; 366 case 0: 367 return CLOCK_MONOTONIC; 368 } 369 } 370 371 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 372 struct timespec64 *ts, enum hrtimer_mode mode) 373 __must_hold(&ctx->timeout_lock) 374 { 375 struct io_timeout_data *io; 376 struct io_timeout *timeout; 377 struct io_kiocb *req = NULL; 378 379 list_for_each_entry(timeout, &ctx->ltimeout_list, list) { 380 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 381 382 if (user_data == tmp->cqe.user_data) { 383 req = tmp; 384 break; 385 } 386 } 387 if (!req) 388 return -ENOENT; 389 390 io = req->async_data; 391 if (hrtimer_try_to_cancel(&io->timer) == -1) 392 return -EALREADY; 393 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode); 394 io->timer.function = io_link_timeout_fn; 395 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); 396 return 0; 397 } 398 399 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 400 struct timespec64 *ts, enum hrtimer_mode mode) 401 __must_hold(&ctx->timeout_lock) 402 { 403 struct io_cancel_data cd = { .ctx = ctx, .data = user_data, }; 404 struct io_kiocb *req = io_timeout_extract(ctx, &cd); 405 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 406 struct io_timeout_data *data; 407 408 if (IS_ERR(req)) 409 return PTR_ERR(req); 410 411 timeout->off = 0; /* noseq */ 412 data = req->async_data; 413 list_add_tail(&timeout->list, &ctx->timeout_list); 414 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode); 415 data->timer.function = io_timeout_fn; 416 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode); 417 return 0; 418 } 419 420 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 421 { 422 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 423 424 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 425 return -EINVAL; 426 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) 427 return -EINVAL; 428 429 tr->ltimeout = false; 430 tr->addr = READ_ONCE(sqe->addr); 431 tr->flags = READ_ONCE(sqe->timeout_flags); 432 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { 433 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 434 return -EINVAL; 435 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) 436 tr->ltimeout = true; 437 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) 438 return -EINVAL; 439 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) 440 return -EFAULT; 441 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0) 442 return -EINVAL; 443 } else if (tr->flags) { 444 /* timeout removal doesn't support flags */ 445 return -EINVAL; 446 } 447 448 return 0; 449 } 450 451 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) 452 { 453 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS 454 : HRTIMER_MODE_REL; 455 } 456 457 /* 458 * Remove or update an existing timeout command 459 */ 460 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) 461 { 462 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 463 struct io_ring_ctx *ctx = req->ctx; 464 int ret; 465 466 if (!(tr->flags & IORING_TIMEOUT_UPDATE)) { 467 struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, }; 468 469 spin_lock(&ctx->completion_lock); 470 ret = io_timeout_cancel(ctx, &cd); 471 spin_unlock(&ctx->completion_lock); 472 } else { 473 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); 474 475 spin_lock_irq(&ctx->timeout_lock); 476 if (tr->ltimeout) 477 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); 478 else 479 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); 480 spin_unlock_irq(&ctx->timeout_lock); 481 } 482 483 if (ret < 0) 484 req_set_fail(req); 485 io_req_set_res(req, ret, 0); 486 return IOU_OK; 487 } 488 489 static int __io_timeout_prep(struct io_kiocb *req, 490 const struct io_uring_sqe *sqe, 491 bool is_timeout_link) 492 { 493 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 494 struct io_timeout_data *data; 495 unsigned flags; 496 u32 off = READ_ONCE(sqe->off); 497 498 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) 499 return -EINVAL; 500 if (off && is_timeout_link) 501 return -EINVAL; 502 flags = READ_ONCE(sqe->timeout_flags); 503 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | 504 IORING_TIMEOUT_ETIME_SUCCESS | 505 IORING_TIMEOUT_MULTISHOT)) 506 return -EINVAL; 507 /* more than one clock specified is invalid, obviously */ 508 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 509 return -EINVAL; 510 /* multishot requests only make sense with rel values */ 511 if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS))) 512 return -EINVAL; 513 514 INIT_LIST_HEAD(&timeout->list); 515 timeout->off = off; 516 if (unlikely(off && !req->ctx->off_timeout_used)) 517 req->ctx->off_timeout_used = true; 518 /* 519 * for multishot reqs w/ fixed nr of repeats, repeats tracks the 520 * remaining nr 521 */ 522 timeout->repeats = 0; 523 if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0) 524 timeout->repeats = off; 525 526 if (WARN_ON_ONCE(req_has_async_data(req))) 527 return -EFAULT; 528 if (io_alloc_async_data(req)) 529 return -ENOMEM; 530 531 data = req->async_data; 532 data->req = req; 533 data->flags = flags; 534 535 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) 536 return -EFAULT; 537 538 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) 539 return -EINVAL; 540 541 data->mode = io_translate_timeout_mode(flags); 542 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); 543 544 if (is_timeout_link) { 545 struct io_submit_link *link = &req->ctx->submit_state.link; 546 547 if (!link->head) 548 return -EINVAL; 549 if (link->last->opcode == IORING_OP_LINK_TIMEOUT) 550 return -EINVAL; 551 timeout->head = link->last; 552 link->last->flags |= REQ_F_ARM_LTIMEOUT; 553 } 554 return 0; 555 } 556 557 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 558 { 559 return __io_timeout_prep(req, sqe, false); 560 } 561 562 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 563 { 564 return __io_timeout_prep(req, sqe, true); 565 } 566 567 int io_timeout(struct io_kiocb *req, unsigned int issue_flags) 568 { 569 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 570 struct io_ring_ctx *ctx = req->ctx; 571 struct io_timeout_data *data = req->async_data; 572 struct list_head *entry; 573 u32 tail, off = timeout->off; 574 575 spin_lock_irq(&ctx->timeout_lock); 576 577 /* 578 * sqe->off holds how many events that need to occur for this 579 * timeout event to be satisfied. If it isn't set, then this is 580 * a pure timeout request, sequence isn't used. 581 */ 582 if (io_is_timeout_noseq(req)) { 583 entry = ctx->timeout_list.prev; 584 goto add; 585 } 586 587 tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts); 588 timeout->target_seq = tail + off; 589 590 /* Update the last seq here in case io_flush_timeouts() hasn't. 591 * This is safe because ->completion_lock is held, and submissions 592 * and completions are never mixed in the same ->completion_lock section. 593 */ 594 ctx->cq_last_tm_flush = tail; 595 596 /* 597 * Insertion sort, ensuring the first entry in the list is always 598 * the one we need first. 599 */ 600 list_for_each_prev(entry, &ctx->timeout_list) { 601 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list); 602 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt); 603 604 if (io_is_timeout_noseq(nxt)) 605 continue; 606 /* nxt.seq is behind @tail, otherwise would've been completed */ 607 if (off >= nextt->target_seq - tail) 608 break; 609 } 610 add: 611 list_add(&timeout->list, entry); 612 data->timer.function = io_timeout_fn; 613 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 614 spin_unlock_irq(&ctx->timeout_lock); 615 return IOU_ISSUE_SKIP_COMPLETE; 616 } 617 618 void io_queue_linked_timeout(struct io_kiocb *req) 619 { 620 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 621 struct io_ring_ctx *ctx = req->ctx; 622 623 spin_lock_irq(&ctx->timeout_lock); 624 /* 625 * If the back reference is NULL, then our linked request finished 626 * before we got a chance to setup the timer 627 */ 628 if (timeout->head) { 629 struct io_timeout_data *data = req->async_data; 630 631 data->timer.function = io_link_timeout_fn; 632 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), 633 data->mode); 634 list_add_tail(&timeout->list, &ctx->ltimeout_list); 635 } 636 spin_unlock_irq(&ctx->timeout_lock); 637 /* drop submission reference */ 638 io_put_req(req); 639 } 640 641 static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx, 642 bool cancel_all) 643 __must_hold(&head->ctx->timeout_lock) 644 { 645 struct io_kiocb *req; 646 647 if (tctx && head->tctx != tctx) 648 return false; 649 if (cancel_all) 650 return true; 651 652 io_for_each_link(req, head) { 653 if (req->flags & REQ_F_INFLIGHT) 654 return true; 655 } 656 return false; 657 } 658 659 /* Returns true if we found and killed one or more timeouts */ 660 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx, 661 bool cancel_all) 662 { 663 struct io_timeout *timeout, *tmp; 664 int canceled = 0; 665 666 /* 667 * completion_lock is needed for io_match_task(). Take it before 668 * timeout_lockfirst to keep locking ordering. 669 */ 670 spin_lock(&ctx->completion_lock); 671 spin_lock_irq(&ctx->timeout_lock); 672 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 673 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 674 675 if (io_match_task(req, tctx, cancel_all) && 676 io_kill_timeout(req, -ECANCELED)) 677 canceled++; 678 } 679 spin_unlock_irq(&ctx->timeout_lock); 680 spin_unlock(&ctx->completion_lock); 681 return canceled != 0; 682 } 683