Home
last modified time | relevance | path

Searched refs:issue_flags (Results 1 – 25 of 40) sorted by relevance

12

/linux/io_uring/
H A During_cmd.c19 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags) in io_req_uring_cleanup() argument
29 if (issue_flags & IO_URING_F_UNLOCKED) in io_req_uring_cleanup()
67 unsigned int issue_flags) in io_uring_cmd_del_cancelable() argument
76 io_ring_submit_lock(ctx, issue_flags); in io_uring_cmd_del_cancelable()
78 io_ring_submit_unlock(ctx, issue_flags); in io_uring_cmd_del_cancelable()
91 unsigned int issue_flags) in io_uring_cmd_mark_cancelable() argument
98 io_ring_submit_lock(ctx, issue_flags); in io_uring_cmd_mark_cancelable()
100 io_ring_submit_unlock(ctx, issue_flags); in io_uring_cmd_mark_cancelable()
141 unsigned issue_flags) in io_uring_cmd_done() argument
145 io_uring_cmd_del_cancelable(ioucmd, issue_flags); in io_uring_cmd_done()
[all …]
H A Dopenclose.c114 int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument
128 if (issue_flags & IO_URING_F_NONBLOCK) { in io_openat2()
153 (!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK))) in io_openat2()
158 if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set) in io_openat2()
164 ret = io_fixed_fd_install(req, issue_flags, file, in io_openat2()
175 int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument
177 return io_openat2(req, issue_flags); in io_openat()
188 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags, in __io_close_fixed() argument
193 io_ring_submit_lock(ctx, issue_flags); in __io_close_fixed()
195 io_ring_submit_unlock(ctx, issue_flags); in __io_close_fixed()
[all …]
H A Dnet.c104 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument
110 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_shutdown()
137 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_netmsg_recycle() argument
142 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) { in io_netmsg_recycle()
442 unsigned int issue_flags) in io_req_msg_cleanup() argument
445 io_netmsg_recycle(req, issue_flags); in io_req_msg_cleanup()
489 unsigned issue_flags) in io_send_finish() argument
496 cflags = io_put_kbuf(req, *ret, issue_flags); in io_send_finish()
500 cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags); in io_send_finish()
521 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) in io_sendmsg() argument
[all …]
H A Dopenclose.h3 int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
7 int io_openat(struct io_kiocb *req, unsigned int issue_flags);
11 int io_openat2(struct io_kiocb *req, unsigned int issue_flags);
14 int io_close(struct io_kiocb *req, unsigned int issue_flags);
17 int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags);
H A Dfs.h4 int io_renameat(struct io_kiocb *req, unsigned int issue_flags);
8 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags);
12 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags);
16 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags);
19 int io_linkat(struct io_kiocb *req, unsigned int issue_flags);
H A Dfutex.c117 unsigned int issue_flags) in io_futex_cancel() argument
126 io_ring_submit_lock(ctx, issue_flags); in io_futex_cancel()
136 io_ring_submit_unlock(ctx, issue_flags); in io_futex_cancel()
254 int io_futexv_wait(struct io_kiocb *req, unsigned int issue_flags) in io_futexv_wait() argument
261 io_ring_submit_lock(ctx, issue_flags); in io_futexv_wait()
269 io_ring_submit_unlock(ctx, issue_flags); in io_futexv_wait()
305 io_ring_submit_unlock(ctx, issue_flags); in io_futexv_wait()
309 int io_futex_wait(struct io_kiocb *req, unsigned int issue_flags) in io_futex_wait() argument
322 io_ring_submit_lock(ctx, issue_flags); in io_futex_wait()
339 io_ring_submit_unlock(ctx, issue_flags); in io_futex_wait()
[all …]
H A Drw.c94 unsigned int issue_flags) in __io_import_iovec() argument
108 buf = io_buffer_select(req, &sqe_len, issue_flags); in __io_import_iovec()
140 unsigned int issue_flags) in io_import_iovec() argument
144 ret = __io_import_iovec(rw, req, io, issue_flags); in io_import_iovec()
152 static void io_rw_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_rw_recycle() argument
156 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) in io_rw_recycle()
166 static void io_req_rw_cleanup(struct io_kiocb *req, unsigned int issue_flags) in io_req_rw_cleanup() argument
197 io_rw_recycle(req, issue_flags); in io_req_rw_cleanup()
605 unsigned int issue_flags) in kiocb_done() argument
619 io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags)); in kiocb_done()
[all …]
H A Dxattr.h6 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags);
9 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags);
12 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags);
15 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags);
H A Dsync.c40 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument
46 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_sync_file_range()
70 int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument
77 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fsync()
99 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument
105 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_fallocate()
H A Dkbuf.c55 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle_legacy() argument
61 io_ring_submit_lock(ctx, issue_flags); in io_kbuf_recycle_legacy()
69 io_ring_submit_unlock(ctx, issue_flags); in io_kbuf_recycle_legacy()
73 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags) in __io_put_kbuf() argument
87 if (issue_flags & IO_URING_F_UNLOCKED) { in __io_put_kbuf()
137 unsigned int issue_flags) in io_ring_buffer_select() argument
159 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { in io_ring_buffer_select()
177 unsigned int issue_flags) in io_buffer_select() argument
183 io_ring_submit_lock(req->ctx, issue_flags); in io_buffer_select()
188 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
[all …]
H A Dfs.c81 int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument
86 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_renameat()
130 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument
135 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_unlinkat()
177 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags) in io_mkdirat() argument
182 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_mkdirat()
227 int io_symlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_symlinkat() argument
232 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_symlinkat()
272 int io_linkat(struct io_kiocb *req, unsigned int issue_flags) in io_linkat() argument
277 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_linkat()
H A Dsync.h4 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags);
7 int io_fsync(struct io_kiocb *req, unsigned int issue_flags);
9 int io_fallocate(struct io_kiocb *req, unsigned int issue_flags);
H A Dnop.c49 int io_nop(struct io_kiocb *req, unsigned int issue_flags) in io_nop() argument
56 req->file = io_file_get_fixed(req, nop->fd, issue_flags); in io_nop()
71 io_ring_submit_lock(ctx, issue_flags); in io_nop()
77 io_ring_submit_unlock(ctx, issue_flags); in io_nop()
H A Dwaitid.c159 unsigned int issue_flags) in io_waitid_cancel() argument
168 io_ring_submit_lock(ctx, issue_flags); in io_waitid_cancel()
178 io_ring_submit_unlock(ctx, issue_flags); in io_waitid_cancel()
305 int io_waitid(struct io_kiocb *req, unsigned int issue_flags) in io_waitid() argument
330 io_ring_submit_lock(ctx, issue_flags); in io_waitid()
345 io_ring_submit_unlock(ctx, issue_flags); in io_waitid()
353 io_ring_submit_unlock(ctx, issue_flags); in io_waitid()
361 io_ring_submit_unlock(ctx, issue_flags); in io_waitid()
H A Dadvise.c51 int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument
57 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_madvise()
96 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument
101 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK && io_fadvise_force_async(fa)); in io_fadvise()
H A Dadvise.h4 int io_madvise(struct io_kiocb *req, unsigned int issue_flags);
7 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags);
H A Dtruncate.c37 int io_ftruncate(struct io_kiocb *req, unsigned int issue_flags) in io_ftruncate() argument
42 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK); in io_ftruncate()
H A Depoll.c46 int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) in io_epoll_ctl() argument
50 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK; in io_epoll_ctl()
H A Dio_uring.h86 unsigned issue_flags);
256 unsigned issue_flags) in io_ring_submit_unlock() argument
259 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) in io_ring_submit_unlock()
264 unsigned issue_flags) in io_ring_submit_lock() argument
272 if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) in io_ring_submit_lock()
H A Dnop.h4 int io_nop(struct io_kiocb *req, unsigned int issue_flags);
H A Dtruncate.h4 int io_ftruncate(struct io_kiocb *req, unsigned int issue_flags);
/linux/drivers/nvme/host/
H A Dioctl.c419 unsigned issue_flags) in nvme_uring_task_cb() argument
425 io_uring_cmd_done(ioucmd, pdu->status, pdu->result, issue_flags); in nvme_uring_task_cb()
463 struct io_uring_cmd *ioucmd, unsigned int issue_flags, bool vec) in nvme_uring_cmd_io() argument
505 if (issue_flags & IO_URING_F_NONBLOCK) { in nvme_uring_cmd_io()
509 if (issue_flags & IO_URING_F_IOPOLL) in nvme_uring_cmd_io()
632 static int nvme_uring_cmd_checks(unsigned int issue_flags) in nvme_uring_cmd_checks() argument
636 if ((issue_flags & (IO_URING_F_SQE128|IO_URING_F_CQE32)) != in nvme_uring_cmd_checks()
643 unsigned int issue_flags) in nvme_ns_uring_cmd() argument
648 ret = nvme_uring_cmd_checks(issue_flags); in nvme_ns_uring_cmd()
654 ret = nvme_uring_cmd_io(ctrl, ns, ioucmd, issue_flags, false); in nvme_ns_uring_cmd()
[all …]
/linux/include/linux/io_uring/
H A Dcmd.h52 unsigned issue_flags);
63 unsigned int issue_flags);
75 u64 ret2, unsigned issue_flags) in io_uring_cmd_done() argument
84 unsigned int issue_flags) in io_uring_cmd_mark_cancelable() argument
H A Dnet.h8 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
12 unsigned int issue_flags) in io_uring_cmd_sock() argument
/linux/drivers/block/
H A Dublk_drv.c1117 unsigned issue_flags) in ubq_complete_io_cmd() argument
1129 io_uring_cmd_done(io->cmd, res, 0, issue_flags); in ubq_complete_io_cmd()
1145 unsigned issue_flags) in __ublk_rq_task_work() argument
1181 ubq_complete_io_cmd(io, UBLK_IO_RES_NEED_GET_DATA, issue_flags); in __ublk_rq_task_work()
1220 ubq_complete_io_cmd(io, UBLK_IO_RES_OK, issue_flags); in __ublk_rq_task_work()
1224 unsigned issue_flags) in ublk_forward_io_cmds() argument
1231 __ublk_rq_task_work(blk_mq_rq_from_pdu(data), issue_flags); in ublk_forward_io_cmds()
1234 static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd, unsigned issue_flags) in ublk_rq_task_work_cb() argument
1239 ublk_forward_io_cmds(ubq, issue_flags); in ublk_rq_task_work_cb()
1488 unsigned int issue_flags) in ublk_cancel_cmd() argument
[all …]

12