Lines Matching defs:issue_flags
26 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
31 if (issue_flags & IO_URING_F_UNLOCKED)
78 unsigned int issue_flags)
87 io_ring_submit_lock(ctx, issue_flags);
89 io_ring_submit_unlock(ctx, issue_flags);
95 * issue_flags of IO_URING_F_CANCEL.
102 unsigned int issue_flags)
109 io_ring_submit_lock(ctx, issue_flags);
111 io_ring_submit_unlock(ctx, issue_flags);
155 unsigned issue_flags, bool is_cqe32)
162 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
173 io_req_uring_cleanup(req, issue_flags);
177 } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
178 if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
231 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
246 issue_flags |= IO_URING_F_SQE128;
248 issue_flags |= IO_URING_F_CQE32;
250 issue_flags |= IO_URING_F_COMPAT;
254 issue_flags |= IO_URING_F_IOPOLL;
263 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
276 io_req_uring_cleanup(req, issue_flags);
284 unsigned int issue_flags)
291 return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
299 unsigned issue_flags)
313 issue_flags);
325 unsigned int issue_flags, __poll_t mask)
336 ret = io_arm_apoll(req, issue_flags, mask);
341 unsigned int issue_flags,
346 if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_MULTISHOT)))
357 unsigned int issue_flags)
367 return io_buffer_select(req, len, buf_group, issue_flags);
380 struct io_br_sel *sel, unsigned int issue_flags)
394 io_kbuf_recycle(req, sel->buf_list, issue_flags);