Lines Matching full:req

36 	struct io_kiocb *req;
73 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req)
82 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs);
85 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
94 static inline bool io_poll_get_ownership(struct io_kiocb *req)
96 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS))
97 return io_poll_get_ownership_slowpath(req);
98 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK);
101 static void io_poll_mark_cancelled(struct io_kiocb *req)
103 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs);
106 static struct io_poll *io_poll_get_double(struct io_kiocb *req)
109 if (req->opcode == IORING_OP_POLL_ADD)
110 return req->async_data;
111 return req->apoll->double_poll;
114 static struct io_poll *io_poll_get_single(struct io_kiocb *req)
116 if (req->opcode == IORING_OP_POLL_ADD)
117 return io_kiocb_to_cmd(req, struct io_poll);
118 return &req->apoll->poll;
121 static void io_poll_req_insert(struct io_kiocb *req)
123 struct io_hash_table *table = &req->ctx->cancel_table;
124 u32 index = hash_long(req->cqe.user_data, table->hash_bits);
126 lockdep_assert_held(&req->ctx->uring_lock);
128 hlist_add_head(&req->hash_node, &table->hbs[index].list);
153 static void io_poll_remove_entries(struct io_kiocb *req)
159 if (!(req->flags & (REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL)))
178 if (req->flags & REQ_F_SINGLE_POLL)
179 io_poll_remove_entry(io_poll_get_single(req));
180 if (req->flags & REQ_F_DOUBLE_POLL)
181 io_poll_remove_entry(io_poll_get_double(req));
193 static void __io_poll_execute(struct io_kiocb *req, int mask)
197 io_req_set_res(req, mask, 0);
198 req->io_task_work.func = io_poll_task_func;
200 trace_io_uring_task_add(req, mask);
202 if (!(req->flags & REQ_F_POLL_NO_LAZY))
204 __io_req_task_work_add(req, flags);
207 static inline void io_poll_execute(struct io_kiocb *req, int res)
209 if (io_poll_get_ownership(req))
210 __io_poll_execute(req, res);
220 * req->cqe.res. IOU_POLL_REMOVE_POLL_USE_RES indicates to remove multishot
221 * poll and that the result is stored in req->cqe.
223 static int io_poll_check_events(struct io_kiocb *req, io_tw_token_t tw)
227 if (unlikely(io_should_terminate_tw(req->ctx)))
231 v = atomic_read(&req->poll_refs);
245 req->cqe.res = 0;
248 req->cqe.res = 0;
254 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs);
260 if (!req->cqe.res) {
261 struct poll_table_struct pt = { ._key = req->apoll_events };
262 req->cqe.res = vfs_poll(req->file, &pt) & req->apoll_events;
269 if (unlikely(!req->cqe.res)) {
271 if (!(req->apoll_events & EPOLLONESHOT))
276 if (req->apoll_events & EPOLLONESHOT)
280 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
281 __poll_t mask = mangle_poll(req->cqe.res &
282 req->apoll_events);
284 if (!io_req_post_cqe(req, mask, IORING_CQE_F_MORE)) {
285 io_req_set_res(req, mask, 0);
289 int ret = io_poll_issue(req, tw);
300 req->cqe.res = 0;
307 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK);
309 io_napi_add(req);
313 void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw)
317 ret = io_poll_check_events(req, tw);
321 __io_poll_execute(req, 0);
324 io_poll_remove_entries(req);
326 hash_del(&req->hash_node);
328 if (req->opcode == IORING_OP_POLL_ADD) {
332 poll = io_kiocb_to_cmd(req, struct io_poll);
333 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
335 io_req_task_submit(req, tw);
338 req->cqe.res = ret;
339 req_set_fail(req);
342 io_req_set_res(req, req->cqe.res, 0);
343 io_req_task_complete(req, tw);
345 io_tw_lock(req->ctx, tw);
348 io_req_task_complete(req, tw);
350 io_req_task_submit(req, tw);
352 io_req_defer_failed(req, ret);
356 static void io_poll_cancel_req(struct io_kiocb *req)
358 io_poll_mark_cancelled(req);
360 io_poll_execute(req, 0);
365 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
367 io_poll_mark_cancelled(req);
369 io_poll_execute(req, 0);
382 * as req->head is NULL'ed out, the request can be
393 struct io_kiocb *req = wqe_to_req(wait);
398 return io_pollfree_wake(req, poll);
404 if (io_poll_get_ownership(req)) {
418 req->flags &= ~REQ_F_DOUBLE_POLL;
420 req->flags &= ~REQ_F_SINGLE_POLL;
422 __io_poll_execute(req, mask);
428 static bool io_poll_double_prepare(struct io_kiocb *req)
431 struct io_poll *poll = io_poll_get_single(req);
437 * poll arm might not hold ownership and so race for req->flags with
444 req->flags |= REQ_F_DOUBLE_POLL;
445 if (req->opcode == IORING_OP_POLL_ADD)
446 req->flags |= REQ_F_ASYNC_DATA;
457 struct io_kiocb *req = pt->req;
458 unsigned long wqe_private = (unsigned long) req;
488 if (!io_poll_double_prepare(req)) {
496 req->flags |= REQ_F_SINGLE_POLL;
514 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
517 (struct io_poll **) &pt->req->async_data);
520 static bool io_poll_can_finish_inline(struct io_kiocb *req,
523 return pt->owning || io_poll_get_ownership(req);
526 static void io_poll_add_hash(struct io_kiocb *req, unsigned int issue_flags)
528 struct io_ring_ctx *ctx = req->ctx;
531 io_poll_req_insert(req);
541 static int __io_arm_poll_handler(struct io_kiocb *req,
546 INIT_HLIST_NODE(&req->hash_node);
548 poll->file = req->file;
549 req->apoll_events = poll->events;
552 ipt->req = req;
567 atomic_set(&req->poll_refs, (int)ipt->owning);
577 req->flags |= REQ_F_POLL_NO_LAZY;
579 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
582 io_poll_remove_entries(req);
584 if (!io_poll_can_finish_inline(req, ipt)) {
585 io_poll_mark_cancelled(req);
596 if (!io_poll_can_finish_inline(req, ipt)) {
597 io_poll_add_hash(req, issue_flags);
600 io_poll_remove_entries(req);
602 /* no one else has access to the req, forget about the ref */
606 io_poll_add_hash(req, issue_flags);
609 io_poll_can_finish_inline(req, ipt)) {
610 __io_poll_execute(req, mask);
613 io_napi_add(req);
620 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1)
621 __io_poll_execute(req, 0);
630 struct async_poll *apoll = pt->req->apoll;
643 static struct async_poll *io_req_alloc_apoll(struct io_kiocb *req,
646 struct io_ring_ctx *ctx = req->ctx;
649 if (req->flags & REQ_F_POLLED) {
650 apoll = req->apoll;
662 req->apoll = apoll;
668 int io_arm_apoll(struct io_kiocb *req, unsigned issue_flags, __poll_t mask)
675 if (!io_file_can_poll(req))
677 if (!(req->flags & REQ_F_APOLL_MULTISHOT))
680 apoll = io_req_alloc_apoll(req, issue_flags);
683 req->flags &= ~(REQ_F_SINGLE_POLL | REQ_F_DOUBLE_POLL);
684 req->flags |= REQ_F_POLLED;
687 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
690 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
694 int io_arm_poll_handler(struct io_kiocb *req, unsigned issue_flags)
696 const struct io_issue_def *def = &io_issue_defs[req->opcode];
701 if (!io_file_can_poll(req))
708 if (req->flags & REQ_F_CLEAR_POLLIN)
716 return io_arm_apoll(req, issue_flags, mask);
727 struct io_kiocb *req;
736 hlist_for_each_entry_safe(req, tmp, &hb->list, hash_node) {
737 if (io_match_task_safe(req, tctx, cancel_all)) {
738 hlist_del_init(&req->hash_node);
739 io_poll_cancel_req(req);
750 struct io_kiocb *req;
754 hlist_for_each_entry(req, &hb->list, hash_node) {
755 if (cd->data != req->cqe.user_data)
757 if (poll_only && req->opcode != IORING_OP_POLL_ADD)
760 if (io_cancel_match_sequence(req, cd->seq))
763 return req;
772 struct io_kiocb *req;
778 hlist_for_each_entry(req, &hb->list, hash_node) {
779 if (io_cancel_req_match(req, cd))
780 return req;
786 static int io_poll_disarm(struct io_kiocb *req)
788 if (!req)
790 if (!io_poll_get_ownership(req))
792 io_poll_remove_entries(req);
793 hash_del(&req->hash_node);
799 struct io_kiocb *req;
803 req = io_poll_file_find(ctx, cd);
805 req = io_poll_find(ctx, false, cd);
807 if (req) {
808 io_poll_cancel_req(req);
842 int io_poll_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
844 struct io_poll_update *upd = io_kiocb_to_cmd(req, struct io_poll_update);
872 int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
874 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
882 if ((flags & IORING_POLL_ADD_MULTI) && (req->flags & REQ_F_CQE_SKIP))
889 int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
891 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
897 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
899 io_req_set_res(req, ipt.result_mask, 0);
905 int io_poll_remove(struct io_kiocb *req, unsigned int issue_flags)
907 struct io_poll_update *poll_update = io_kiocb_to_cmd(req, struct io_poll_update);
908 struct io_ring_ctx *ctx = req->ctx;
950 req_set_fail(req);
954 io_req_set_res(req, ret, 0);