Lines Matching full:req

38 static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
41 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
43 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
44 struct io_timeout_data *data = req->async_data;
49 static inline void io_put_req(struct io_kiocb *req)
51 if (req_ref_put_and_test(req)) {
52 io_queue_next(req);
53 io_free_req(req);
71 static void io_timeout_complete(struct io_kiocb *req, io_tw_token_t tw)
73 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
74 struct io_timeout_data *data = req->async_data;
75 struct io_ring_ctx *ctx = req->ctx;
78 if (io_req_post_cqe(req, -ETIME, IORING_CQE_F_MORE)) {
88 io_req_task_complete(req, tw);
98 struct io_kiocb *req;
102 req = cmd_to_io_kiocb(timeout);
104 req_set_fail(req);
105 io_req_queue_tw_complete(req, err);
111 static void io_kill_timeout(struct io_kiocb *req, struct list_head *list)
112 __must_hold(&req->ctx->timeout_lock)
114 struct io_timeout_data *io = req->async_data;
117 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
119 atomic_set(&req->ctx->cq_timeouts,
120 atomic_read(&req->ctx->cq_timeouts) + 1);
135 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
138 if (io_is_timeout_noseq(req))
153 io_kill_timeout(req, &list);
176 static void io_fail_links(struct io_kiocb *req)
177 __must_hold(&req->ctx->completion_lock)
179 struct io_kiocb *link = req->link;
180 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES;
190 trace_io_uring_fail_link(req, link);
194 link = req->link;
197 req->link = NULL;
200 static inline void io_remove_next_linked(struct io_kiocb *req)
202 struct io_kiocb *nxt = req->link;
204 req->link = nxt->link;
208 void io_disarm_next(struct io_kiocb *req)
209 __must_hold(&req->ctx->completion_lock)
213 if (req->flags & REQ_F_ARM_LTIMEOUT) {
214 link = req->link;
215 req->flags &= ~REQ_F_ARM_LTIMEOUT;
217 io_remove_next_linked(req);
220 } else if (req->flags & REQ_F_LINK_TIMEOUT) {
221 struct io_ring_ctx *ctx = req->ctx;
224 if (req->link && req->link->opcode == IORING_OP_LINK_TIMEOUT)
225 link = __io_disarm_linked_timeout(req, req->link);
231 if (unlikely((req->flags & REQ_F_FAIL) &&
232 !(req->flags & REQ_F_HARDLINK)))
233 io_fail_links(req);
236 static struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
238 __must_hold(&req->ctx->completion_lock)
239 __must_hold(&req->ctx->timeout_lock)
244 io_remove_next_linked(req);
258 struct io_kiocb *req = data->req;
259 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
260 struct io_ring_ctx *ctx = req->ctx;
265 atomic_set(&req->ctx->cq_timeouts,
266 atomic_read(&req->ctx->cq_timeouts) + 1);
270 req_set_fail(req);
272 io_req_set_res(req, -ETIME, 0);
273 req->io_task_work.func = io_timeout_complete;
274 io_req_task_work_add(req);
284 struct io_kiocb *req = NULL;
290 req = tmp;
294 if (!req)
297 io = req->async_data;
300 timeout = io_kiocb_to_cmd(req, struct io_timeout);
302 return req;
308 struct io_kiocb *req;
311 req = io_timeout_extract(ctx, cd);
314 if (IS_ERR(req))
315 return PTR_ERR(req);
316 io_req_task_queue_fail(req, -ECANCELED);
320 static void io_req_task_link_timeout(struct io_kiocb *req, io_tw_token_t tw)
322 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
327 if (!io_should_terminate_tw(req->ctx)) {
329 .ctx = req->ctx,
333 ret = io_try_cancel(req->tctx, &cd, 0);
337 io_req_set_res(req, ret ?: -ETIME, 0);
338 io_req_task_complete(req, tw);
341 io_req_set_res(req, -ETIME, 0);
342 io_req_task_complete(req, tw);
350 struct io_kiocb *prev, *req = data->req;
351 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
352 struct io_ring_ctx *ctx = req->ctx;
372 req->io_task_work.func = io_req_task_link_timeout;
373 io_req_task_work_add(req);
399 struct io_kiocb *req = NULL;
405 req = tmp;
409 if (!req)
412 io = req->async_data;
425 struct io_kiocb *req = io_timeout_extract(ctx, &cd);
426 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
429 if (IS_ERR(req))
430 return PTR_ERR(req);
433 data = req->async_data;
442 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
444 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
446 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
482 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags)
484 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem);
485 struct io_ring_ctx *ctx = req->ctx;
506 req_set_fail(req);
507 io_req_set_res(req, ret, 0);
511 static int __io_timeout_prep(struct io_kiocb *req,
515 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
538 if (unlikely(off && !req->ctx->off_timeout_used))
539 req->ctx->off_timeout_used = true;
548 if (WARN_ON_ONCE(req_has_async_data(req)))
550 data = io_uring_alloc_async_data(NULL, req);
553 data->req = req;
565 struct io_submit_link *link = &req->ctx->submit_state.link;
581 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
583 return __io_timeout_prep(req, sqe, false);
586 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
588 return __io_timeout_prep(req, sqe, true);
591 int io_timeout(struct io_kiocb *req, unsigned int issue_flags)
593 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
594 struct io_ring_ctx *ctx = req->ctx;
595 struct io_timeout_data *data = req->async_data;
606 if (io_is_timeout_noseq(req)) {
641 void io_queue_linked_timeout(struct io_kiocb *req)
643 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout);
644 struct io_ring_ctx *ctx = req->ctx;
652 struct io_timeout_data *data = req->async_data;
660 io_put_req(req);
667 struct io_kiocb *req;
674 io_for_each_link(req, head) {
675 if (req->flags & REQ_F_INFLIGHT)
695 struct io_kiocb *req = cmd_to_io_kiocb(timeout);
697 if (io_match_task(req, tctx, cancel_all))
698 io_kill_timeout(req, &list);