1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring.h>
6 #include <linux/time_namespace.h>
7
8 #include <trace/events/io_uring.h>
9
10 #include <uapi/linux/io_uring.h>
11
12 #include "io_uring.h"
13 #include "refs.h"
14 #include "cancel.h"
15 #include "timeout.h"
16
17 struct io_timeout {
18 struct file *file;
19 u32 off;
20 u32 target_seq;
21 u32 repeats;
22 struct list_head list;
23 /* head of the link, used by linked timeouts only */
24 struct io_kiocb *head;
25 /* for linked completions */
26 struct io_kiocb *prev;
27 };
28
29 struct io_timeout_rem {
30 struct file *file;
31 u64 addr;
32
33 /* timeout update */
34 ktime_t time;
35 u32 flags;
36 bool ltimeout;
37 };
38
io_flags_to_clock(unsigned flags)39 static clockid_t io_flags_to_clock(unsigned flags)
40 {
41 switch (flags & IORING_TIMEOUT_CLOCK_MASK) {
42 case IORING_TIMEOUT_BOOTTIME:
43 return CLOCK_BOOTTIME;
44 case IORING_TIMEOUT_REALTIME:
45 return CLOCK_REALTIME;
46 default:
47 /* can't happen, vetted at prep time */
48 WARN_ON_ONCE(1);
49 fallthrough;
50 case 0:
51 return CLOCK_MONOTONIC;
52 }
53 }
54
io_parse_user_time(ktime_t * time,u64 arg,unsigned flags)55 static int io_parse_user_time(ktime_t *time, u64 arg, unsigned flags)
56 {
57 struct timespec64 ts;
58
59 if (flags & IORING_TIMEOUT_IMMEDIATE_ARG) {
60 *time = ns_to_ktime(arg);
61 if (*time < 0)
62 return -EINVAL;
63 goto out;
64 }
65
66 if (get_timespec64(&ts, u64_to_user_ptr(arg)))
67 return -EFAULT;
68 if (ts.tv_sec < 0 || ts.tv_nsec < 0)
69 return -EINVAL;
70 *time = timespec64_to_ktime(ts);
71 out:
72 if (flags & IORING_TIMEOUT_ABS)
73 *time = timens_ktime_to_host(io_flags_to_clock(flags), *time);
74 return 0;
75 }
76
77 static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
78 struct io_kiocb *link);
79
io_is_timeout_noseq(struct io_kiocb * req)80 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
81 {
82 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
83 struct io_timeout_data *data = req->async_data;
84
85 return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT;
86 }
87
io_put_req(struct io_kiocb * req)88 static inline void io_put_req(struct io_kiocb *req)
89 {
90 if (req_ref_put_and_test(req)) {
91 io_queue_next(req);
92 io_free_req(req);
93 }
94 }
95
io_timeout_finish(struct io_timeout * timeout,struct io_timeout_data * data)96 static inline bool io_timeout_finish(struct io_timeout *timeout,
97 struct io_timeout_data *data)
98 {
99 if (!(data->flags & IORING_TIMEOUT_MULTISHOT))
100 return true;
101
102 if (!timeout->off || (timeout->repeats && --timeout->repeats))
103 return false;
104
105 return true;
106 }
107
108 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer);
109
io_timeout_complete(struct io_tw_req tw_req,io_tw_token_t tw)110 static void io_timeout_complete(struct io_tw_req tw_req, io_tw_token_t tw)
111 {
112 struct io_kiocb *req = tw_req.req;
113 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
114 struct io_timeout_data *data = req->async_data;
115 struct io_ring_ctx *ctx = req->ctx;
116
117 if (!io_timeout_finish(timeout, data)) {
118 if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
119 /* re-arm timer */
120 raw_spin_lock_irq(&ctx->timeout_lock);
121 list_add(&timeout->list, ctx->timeout_list.prev);
122 hrtimer_start(&data->timer, data->time, data->mode);
123 raw_spin_unlock_irq(&ctx->timeout_lock);
124 return;
125 }
126 }
127
128 io_req_task_complete(tw_req, tw);
129 }
130
io_flush_killed_timeouts(struct list_head * list,int err)131 static __cold bool io_flush_killed_timeouts(struct list_head *list, int err)
132 {
133 if (list_empty(list))
134 return false;
135
136 while (!list_empty(list)) {
137 struct io_timeout *timeout;
138 struct io_kiocb *req;
139
140 timeout = list_first_entry(list, struct io_timeout, list);
141 list_del_init(&timeout->list);
142 req = cmd_to_io_kiocb(timeout);
143 if (err)
144 req_set_fail(req);
145 io_req_queue_tw_complete(req, err);
146 }
147
148 return true;
149 }
150
io_kill_timeout(struct io_kiocb * req,struct list_head * list)151 static void io_kill_timeout(struct io_kiocb *req, struct list_head *list)
152 __must_hold(&req->ctx->timeout_lock)
153 {
154 struct io_timeout_data *io = req->async_data;
155
156 if (hrtimer_try_to_cancel(&io->timer) != -1) {
157 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
158
159 atomic_set(&req->ctx->cq_timeouts,
160 atomic_read(&req->ctx->cq_timeouts) + 1);
161 list_move_tail(&timeout->list, list);
162 }
163 }
164
io_flush_timeouts(struct io_ring_ctx * ctx)165 __cold void io_flush_timeouts(struct io_ring_ctx *ctx)
166 {
167 struct io_timeout *timeout, *tmp;
168 LIST_HEAD(list);
169 u32 seq;
170
171 raw_spin_lock_irq(&ctx->timeout_lock);
172 seq = READ_ONCE(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts);
173
174 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
175 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
176 u32 events_needed, events_got;
177
178 if (io_is_timeout_noseq(req))
179 break;
180
181 /*
182 * Since seq can easily wrap around over time, subtract
183 * the last seq at which timeouts were flushed before comparing.
184 * Assuming not more than 2^31-1 events have happened since,
185 * these subtractions won't have wrapped, so we can check if
186 * target is in [last_seq, current_seq] by comparing the two.
187 */
188 events_needed = timeout->target_seq - ctx->cq_last_tm_flush;
189 events_got = seq - ctx->cq_last_tm_flush;
190 if (events_got < events_needed)
191 break;
192
193 io_kill_timeout(req, &list);
194 }
195 ctx->cq_last_tm_flush = seq;
196 raw_spin_unlock_irq(&ctx->timeout_lock);
197 io_flush_killed_timeouts(&list, 0);
198 }
199
io_req_tw_fail_links(struct io_tw_req tw_req,io_tw_token_t tw)200 static void io_req_tw_fail_links(struct io_tw_req tw_req, io_tw_token_t tw)
201 {
202 struct io_kiocb *link = tw_req.req;
203
204 io_tw_lock(link->ctx, tw);
205 while (link) {
206 struct io_kiocb *nxt = link->link;
207 long res = -ECANCELED;
208
209 if (link->flags & REQ_F_FAIL)
210 res = link->cqe.res;
211 link->link = NULL;
212 io_req_set_res(link, res, 0);
213 io_req_task_complete((struct io_tw_req){link}, tw);
214 link = nxt;
215 }
216 }
217
io_fail_links(struct io_kiocb * req)218 static void io_fail_links(struct io_kiocb *req)
219 __must_hold(&req->ctx->completion_lock)
220 {
221 struct io_kiocb *link = req->link;
222 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
223
224 if (!link)
225 return;
226
227 while (link) {
228 if (ignore_cqes)
229 link->flags |= REQ_F_CQE_SKIP;
230 else
231 link->flags &= ~REQ_F_CQE_SKIP;
232 trace_io_uring_fail_link(req, link);
233 link = link->link;
234 }
235
236 link = req->link;
237 link->io_task_work.func = io_req_tw_fail_links;
238 io_req_task_work_add(link);
239 req->link = NULL;
240 }
241
io_remove_next_linked(struct io_kiocb * req)242 static inline void io_remove_next_linked(struct io_kiocb *req)
243 {
244 struct io_kiocb *nxt = req->link;
245
246 req->link = nxt->link;
247 nxt->link = NULL;
248 }
249
io_disarm_next(struct io_kiocb * req)250 void io_disarm_next(struct io_kiocb *req)
251 __must_hold(&req->ctx->completion_lock)
252 {
253 struct io_kiocb *link = NULL;
254
255 if (req->flags & REQ_F_ARM_LTIMEOUT) {
256 link = req->link;
257 req->flags &= ~REQ_F_ARM_LTIMEOUT;
258 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) {
259 io_remove_next_linked(req);
260 io_req_queue_tw_complete(link, -ECANCELED);
261 }
262 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
263 struct io_ring_ctx *ctx = req->ctx;
264
265 raw_spin_lock_irq(&ctx->timeout_lock);
266 if (req->link && req->link->opcode == IORING_OP_LINK_TIMEOUT)
267 link = __io_disarm_linked_timeout(req, req->link);
268
269 raw_spin_unlock_irq(&ctx->timeout_lock);
270 if (link)
271 io_req_queue_tw_complete(link, -ECANCELED);
272 }
273 if (unlikely((req->flags & REQ_F_FAIL) &&
274 !(req->flags & REQ_F_HARDLINK)))
275 io_fail_links(req);
276 }
277
__io_disarm_linked_timeout(struct io_kiocb * req,struct io_kiocb * link)278 static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
279 struct io_kiocb *link)
280 __must_hold(&req->ctx->completion_lock)
281 __must_hold(&req->ctx->timeout_lock)
282 {
283 struct io_timeout_data *io = link->async_data;
284 struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout);
285
286 io_remove_next_linked(req);
287 timeout->head = NULL;
288 if (hrtimer_try_to_cancel(&io->timer) != -1) {
289 list_del(&timeout->list);
290 return link;
291 }
292
293 return NULL;
294 }
295
io_timeout_fn(struct hrtimer * timer)296 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
297 {
298 struct io_timeout_data *data = container_of(timer,
299 struct io_timeout_data, timer);
300 struct io_kiocb *req = data->req;
301 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
302 struct io_ring_ctx *ctx = req->ctx;
303 unsigned long flags;
304
305 raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
306 list_del_init(&timeout->list);
307 atomic_set(&ctx->cq_timeouts,
308 atomic_read(&ctx->cq_timeouts) + 1);
309 raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);
310
311 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS))
312 req_set_fail(req);
313
314 io_req_set_res(req, -ETIME, 0);
315 req->io_task_work.func = io_timeout_complete;
316 io_req_task_work_add(req);
317 return HRTIMER_NORESTART;
318 }
319
io_timeout_extract(struct io_ring_ctx * ctx,struct io_cancel_data * cd)320 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx,
321 struct io_cancel_data *cd)
322 __must_hold(&ctx->timeout_lock)
323 {
324 struct io_timeout *timeout;
325 struct io_timeout_data *io;
326 struct io_kiocb *req = NULL;
327
328 list_for_each_entry(timeout, &ctx->timeout_list, list) {
329 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
330
331 if (io_cancel_req_match(tmp, cd)) {
332 req = tmp;
333 break;
334 }
335 }
336 if (!req)
337 return ERR_PTR(-ENOENT);
338
339 io = req->async_data;
340 if (hrtimer_try_to_cancel(&io->timer) == -1)
341 return ERR_PTR(-EALREADY);
342 timeout = io_kiocb_to_cmd(req, struct io_timeout);
343 list_del_init(&timeout->list);
344 return req;
345 }
346
io_timeout_cancel(struct io_ring_ctx * ctx,struct io_cancel_data * cd)347 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd)
348 __must_hold(&ctx->completion_lock)
349 {
350 struct io_kiocb *req;
351
352 raw_spin_lock_irq(&ctx->timeout_lock);
353 req = io_timeout_extract(ctx, cd);
354 raw_spin_unlock_irq(&ctx->timeout_lock);
355
356 if (IS_ERR(req))
357 return PTR_ERR(req);
358 io_req_task_queue_fail(req, -ECANCELED);
359 return 0;
360 }
361
io_req_task_link_timeout(struct io_tw_req tw_req,io_tw_token_t tw)362 static void io_req_task_link_timeout(struct io_tw_req tw_req, io_tw_token_t tw)
363 {
364 struct io_kiocb *req = tw_req.req;
365 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
366 struct io_kiocb *prev = timeout->prev;
367 int ret;
368
369 if (prev) {
370 if (!tw.cancel) {
371 struct io_cancel_data cd = {
372 .ctx = req->ctx,
373 .data = prev->cqe.user_data,
374 };
375
376 ret = io_try_cancel(req->tctx, &cd, 0);
377 } else {
378 ret = -ECANCELED;
379 }
380 io_req_set_res(req, ret ?: -ETIME, 0);
381 io_req_task_complete(tw_req, tw);
382 io_put_req(prev);
383 } else {
384 io_req_set_res(req, -ETIME, 0);
385 io_req_task_complete(tw_req, tw);
386 }
387 }
388
io_link_timeout_fn(struct hrtimer * timer)389 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
390 {
391 struct io_timeout_data *data = container_of(timer,
392 struct io_timeout_data, timer);
393 struct io_kiocb *prev, *req = data->req;
394 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
395 struct io_ring_ctx *ctx = req->ctx;
396 unsigned long flags;
397
398 raw_spin_lock_irqsave(&ctx->timeout_lock, flags);
399 prev = timeout->head;
400 timeout->head = NULL;
401
402 /*
403 * We don't expect the list to be empty, that will only happen if we
404 * race with the completion of the linked work.
405 */
406 if (prev) {
407 io_remove_next_linked(prev);
408 if (!req_ref_inc_not_zero(prev))
409 prev = NULL;
410 }
411 list_del(&timeout->list);
412 timeout->prev = prev;
413 raw_spin_unlock_irqrestore(&ctx->timeout_lock, flags);
414
415 req->io_task_work.func = io_req_task_link_timeout;
416 io_req_task_work_add(req);
417 return HRTIMER_NORESTART;
418 }
419
io_timeout_get_clock(struct io_timeout_data * data)420 static clockid_t io_timeout_get_clock(struct io_timeout_data *data)
421 {
422 return io_flags_to_clock(data->flags);
423 }
424
io_linked_timeout_update(struct io_ring_ctx * ctx,__u64 user_data,ktime_t ts,enum hrtimer_mode mode)425 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
426 ktime_t ts, enum hrtimer_mode mode)
427 __must_hold(&ctx->timeout_lock)
428 {
429 struct io_timeout_data *io;
430 struct io_timeout *timeout;
431 struct io_kiocb *req = NULL;
432
433 list_for_each_entry(timeout, &ctx->ltimeout_list, list) {
434 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout);
435
436 if (user_data == tmp->cqe.user_data) {
437 req = tmp;
438 break;
439 }
440 }
441 if (!req)
442 return -ENOENT;
443
444 io = req->async_data;
445 if (hrtimer_try_to_cancel(&io->timer) == -1)
446 return -EALREADY;
447 hrtimer_setup(&io->timer, io_link_timeout_fn, io_timeout_get_clock(io), mode);
448 hrtimer_start(&io->timer, ts, mode);
449 return 0;
450 }
451
io_timeout_update(struct io_ring_ctx * ctx,__u64 user_data,ktime_t time,enum hrtimer_mode mode)452 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data,
453 ktime_t time, enum hrtimer_mode mode)
454 __must_hold(&ctx->timeout_lock)
455 {
456 struct io_cancel_data cd = { .ctx = ctx, .data = user_data, };
457 struct io_kiocb *req = io_timeout_extract(ctx, &cd);
458 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
459 struct io_timeout_data *data;
460
461 if (IS_ERR(req))
462 return PTR_ERR(req);
463
464 timeout->off = 0; /* noseq */
465 data = req->async_data;
466 data->time = time;
467
468 list_add_tail(&timeout->list, &ctx->timeout_list);
469 hrtimer_setup(&data->timer, io_timeout_fn, io_timeout_get_clock(data), mode);
470 hrtimer_start(&data->timer, data->time, mode);
471 return 0;
472 }
473
io_timeout_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)474 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
475 {
476 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
477 int ret;
478
479 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
480 return -EINVAL;
481 if (sqe->addr3 || sqe->__pad2[0])
482 return -EINVAL;
483 if (sqe->buf_index || sqe->len || sqe->splice_fd_in)
484 return -EINVAL;
485
486 tr->ltimeout = false;
487 tr->addr = READ_ONCE(sqe->addr);
488 tr->flags = READ_ONCE(sqe->timeout_flags);
489 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) {
490 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
491 return -EINVAL;
492 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE)
493 tr->ltimeout = true;
494 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK |
495 IORING_TIMEOUT_ABS |
496 IORING_TIMEOUT_IMMEDIATE_ARG))
497 return -EINVAL;
498 ret = io_parse_user_time(&tr->time, READ_ONCE(sqe->addr2), tr->flags);
499 if (ret)
500 return ret;
501 } else if (tr->flags) {
502 /* timeout removal doesn't support flags */
503 return -EINVAL;
504 }
505
506 return 0;
507 }
508
io_translate_timeout_mode(unsigned int flags)509 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags)
510 {
511 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS
512 : HRTIMER_MODE_REL;
513 }
514
515 /*
516 * Remove or update an existing timeout command
517 */
io_timeout_remove(struct io_kiocb * req,unsigned int issue_flags)518 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
519 {
520 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
521 struct io_ring_ctx *ctx = req->ctx;
522 int ret;
523
524 if (!(tr->flags & IORING_TIMEOUT_UPDATE)) {
525 struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, };
526
527 spin_lock(&ctx->completion_lock);
528 ret = io_timeout_cancel(ctx, &cd);
529 spin_unlock(&ctx->completion_lock);
530 } else {
531 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags);
532
533 raw_spin_lock_irq(&ctx->timeout_lock);
534 if (tr->ltimeout)
535 ret = io_linked_timeout_update(ctx, tr->addr, tr->time, mode);
536 else
537 ret = io_timeout_update(ctx, tr->addr, tr->time, mode);
538 raw_spin_unlock_irq(&ctx->timeout_lock);
539 }
540
541 if (ret < 0)
542 req_set_fail(req);
543 io_req_set_res(req, ret, 0);
544 return IOU_COMPLETE;
545 }
546
__io_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool is_timeout_link)547 static int __io_timeout_prep(struct io_kiocb *req,
548 const struct io_uring_sqe *sqe,
549 bool is_timeout_link)
550 {
551 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
552 struct io_timeout_data *data;
553 unsigned flags;
554 u32 off = READ_ONCE(sqe->off);
555 int ret;
556
557 if (sqe->addr3 || sqe->__pad2[0])
558 return -EINVAL;
559 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in)
560 return -EINVAL;
561 if (off && is_timeout_link)
562 return -EINVAL;
563 flags = READ_ONCE(sqe->timeout_flags);
564 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK |
565 IORING_TIMEOUT_ETIME_SUCCESS |
566 IORING_TIMEOUT_MULTISHOT |
567 IORING_TIMEOUT_IMMEDIATE_ARG))
568 return -EINVAL;
569 /* more than one clock specified is invalid, obviously */
570 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1)
571 return -EINVAL;
572 /* multishot requests only make sense with rel values */
573 if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS)))
574 return -EINVAL;
575
576 INIT_LIST_HEAD(&timeout->list);
577 timeout->off = off;
578 if (unlikely(off && !(req->ctx->int_flags & IO_RING_F_OFF_TIMEOUT_USED)))
579 req->ctx->int_flags |= IO_RING_F_OFF_TIMEOUT_USED;
580 /*
581 * for multishot reqs w/ fixed nr of repeats, repeats tracks the
582 * remaining nr
583 */
584 timeout->repeats = 0;
585 if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0)
586 timeout->repeats = off;
587
588 if (WARN_ON_ONCE(req_has_async_data(req)))
589 return -EFAULT;
590 data = io_uring_alloc_async_data(NULL, req);
591 if (!data)
592 return -ENOMEM;
593 data->req = req;
594 data->flags = flags;
595
596 ret = io_parse_user_time(&data->time, READ_ONCE(sqe->addr), flags);
597 if (ret)
598 return ret;
599
600 data->mode = io_translate_timeout_mode(flags);
601
602 if (is_timeout_link) {
603 struct io_submit_link *link = &req->ctx->submit_state.link;
604
605 if (!link->head)
606 return -EINVAL;
607 if (link->last->opcode == IORING_OP_LINK_TIMEOUT)
608 return -EINVAL;
609 timeout->head = link->last;
610 link->last->flags |= REQ_F_ARM_LTIMEOUT;
611 hrtimer_setup(&data->timer, io_link_timeout_fn, io_timeout_get_clock(data),
612 data->mode);
613 } else {
614 hrtimer_setup(&data->timer, io_timeout_fn, io_timeout_get_clock(data), data->mode);
615 }
616 return 0;
617 }
618
io_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)619 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
620 {
621 return __io_timeout_prep(req, sqe, false);
622 }
623
io_link_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)624 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
625 {
626 return __io_timeout_prep(req, sqe, true);
627 }
628
io_timeout(struct io_kiocb * req,unsigned int issue_flags)629 int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
630 {
631 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
632 struct io_ring_ctx *ctx = req->ctx;
633 struct io_timeout_data *data = req->async_data;
634 struct list_head *entry;
635 u32 tail, off = timeout->off;
636
637 raw_spin_lock_irq(&ctx->timeout_lock);
638
639 /*
640 * sqe->off holds how many events that need to occur for this
641 * timeout event to be satisfied. If it isn't set, then this is
642 * a pure timeout request, sequence isn't used.
643 */
644 if (io_is_timeout_noseq(req)) {
645 entry = ctx->timeout_list.prev;
646 goto add;
647 }
648
649 tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts);
650 timeout->target_seq = tail + off;
651
652 /* Update the last seq here in case io_flush_timeouts() hasn't.
653 * This is safe because ->completion_lock is held, and submissions
654 * and completions are never mixed in the same ->completion_lock section.
655 */
656 ctx->cq_last_tm_flush = tail;
657
658 /*
659 * Insertion sort, ensuring the first entry in the list is always
660 * the one we need first.
661 */
662 list_for_each_prev(entry, &ctx->timeout_list) {
663 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list);
664 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt);
665
666 if (io_is_timeout_noseq(nxt))
667 continue;
668 /* nxt.seq is behind @tail, otherwise would've been completed */
669 if (off >= nextt->target_seq - tail)
670 break;
671 }
672 add:
673 list_add(&timeout->list, entry);
674 hrtimer_start(&data->timer, data->time, data->mode);
675 raw_spin_unlock_irq(&ctx->timeout_lock);
676 return IOU_ISSUE_SKIP_COMPLETE;
677 }
678
io_queue_linked_timeout(struct io_kiocb * req)679 void io_queue_linked_timeout(struct io_kiocb *req)
680 {
681 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
682 struct io_ring_ctx *ctx = req->ctx;
683
684 raw_spin_lock_irq(&ctx->timeout_lock);
685 /*
686 * If the back reference is NULL, then our linked request finished
687 * before we got a chance to setup the timer
688 */
689 if (timeout->head) {
690 struct io_timeout_data *data = req->async_data;
691
692 hrtimer_start(&data->timer, data->time, data->mode);
693 list_add_tail(&timeout->list, &ctx->ltimeout_list);
694 }
695 raw_spin_unlock_irq(&ctx->timeout_lock);
696 /* drop submission reference */
697 io_put_req(req);
698 }
699
io_match_task(struct io_kiocb * head,struct io_uring_task * tctx,bool cancel_all)700 static bool io_match_task(struct io_kiocb *head, struct io_uring_task *tctx,
701 bool cancel_all)
702 __must_hold(&head->ctx->timeout_lock)
703 {
704 struct io_kiocb *req;
705
706 if (tctx && head->tctx != tctx)
707 return false;
708 if (cancel_all)
709 return true;
710
711 io_for_each_link(req, head) {
712 if (req->flags & REQ_F_INFLIGHT)
713 return true;
714 }
715 return false;
716 }
717
718 /* Returns true if we found and killed one or more timeouts */
io_kill_timeouts(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)719 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
720 bool cancel_all)
721 {
722 struct io_timeout *timeout, *tmp;
723 LIST_HEAD(list);
724
725 /*
726 * completion_lock is needed for io_match_task(). Take it before
727 * timeout_lockfirst to keep locking ordering.
728 */
729 spin_lock(&ctx->completion_lock);
730 raw_spin_lock_irq(&ctx->timeout_lock);
731 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) {
732 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
733
734 if (io_match_task(req, tctx, cancel_all))
735 io_kill_timeout(req, &list);
736 }
737 raw_spin_unlock_irq(&ctx->timeout_lock);
738 spin_unlock(&ctx->completion_lock);
739
740 return io_flush_killed_timeouts(&list, -ECANCELED);
741 }
742