1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _LINUX_IO_URING_CMD_H
3 #define _LINUX_IO_URING_CMD_H
4
5 #include <uapi/linux/io_uring.h>
6 #include <linux/io_uring_types.h>
7 #include <linux/blk-mq.h>
8
9 /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
10 #define IORING_URING_CMD_CANCELABLE (1U << 30)
11
12 struct io_uring_cmd {
13 struct file *file;
14 const struct io_uring_sqe *sqe;
15 /* callback to defer completions to task context */
16 void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
17 u32 cmd_op;
18 u32 flags;
19 u8 pdu[32]; /* available inline for free use */
20 };
21
22 struct io_uring_cmd_data {
23 void *op_data;
24 };
25
io_uring_sqe_cmd(const struct io_uring_sqe * sqe)26 static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
27 {
28 return sqe->cmd;
29 }
30
io_uring_cmd_private_sz_check(size_t cmd_sz)31 static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
32 {
33 BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
34 }
35 #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
36 io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
37 ((pdu_type *)&(cmd)->pdu) \
38 )
39
40 #if defined(CONFIG_IO_URING)
41 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
42 struct iov_iter *iter,
43 struct io_uring_cmd *ioucmd,
44 unsigned int issue_flags);
45 int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
46 const struct iovec __user *uvec,
47 size_t uvec_segs,
48 int ddir, struct iov_iter *iter,
49 unsigned issue_flags);
50
51 /*
52 * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
53 * and the corresponding io_uring request.
54 *
55 * Note: the caller should never hard code @issue_flags and is only allowed
56 * to pass the mask provided by the core io_uring code.
57 */
58 void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
59 unsigned issue_flags);
60
61 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
62 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
63 unsigned flags);
64
65 /*
66 * Note: the caller should never hard code @issue_flags and only use the
67 * mask provided by the core io_uring code.
68 */
69 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
70 unsigned int issue_flags);
71
72 /* Execute the request from a blocking context */
73 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
74
75 #else
76 static inline int
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,struct io_uring_cmd * ioucmd,unsigned int issue_flags)77 io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
78 struct iov_iter *iter, struct io_uring_cmd *ioucmd,
79 unsigned int issue_flags)
80 {
81 return -EOPNOTSUPP;
82 }
io_uring_cmd_import_fixed_vec(struct io_uring_cmd * ioucmd,const struct iovec __user * uvec,size_t uvec_segs,int ddir,struct iov_iter * iter,unsigned issue_flags)83 static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
84 const struct iovec __user *uvec,
85 size_t uvec_segs,
86 int ddir, struct iov_iter *iter,
87 unsigned issue_flags)
88 {
89 return -EOPNOTSUPP;
90 }
io_uring_cmd_done(struct io_uring_cmd * cmd,ssize_t ret,u64 ret2,unsigned issue_flags)91 static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
92 u64 ret2, unsigned issue_flags)
93 {
94 }
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)95 static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
96 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
97 unsigned flags)
98 {
99 }
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)100 static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
101 unsigned int issue_flags)
102 {
103 }
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)104 static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
105 {
106 }
107 #endif
108
109 /*
110 * Polled completions must ensure they are coming from a poll queue, and
111 * hence are completed inside the usual poll handling loops.
112 */
io_uring_cmd_iopoll_done(struct io_uring_cmd * ioucmd,ssize_t ret,ssize_t res2)113 static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
114 ssize_t ret, ssize_t res2)
115 {
116 lockdep_assert(in_task());
117 io_uring_cmd_done(ioucmd, ret, res2, 0);
118 }
119
120 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
io_uring_cmd_do_in_task_lazy(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))121 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
122 void (*task_work_cb)(struct io_uring_cmd *, unsigned))
123 {
124 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
125 }
126
io_uring_cmd_complete_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))127 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
128 void (*task_work_cb)(struct io_uring_cmd *, unsigned))
129 {
130 __io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
131 }
132
io_uring_cmd_get_task(struct io_uring_cmd * cmd)133 static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
134 {
135 return cmd_to_io_kiocb(cmd)->tctx->task;
136 }
137
io_uring_cmd_get_async_data(struct io_uring_cmd * cmd)138 static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd)
139 {
140 return cmd_to_io_kiocb(cmd)->async_data;
141 }
142
143 int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
144 void (*release)(void *), unsigned int index,
145 unsigned int issue_flags);
146 int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
147 unsigned int issue_flags);
148
149 #endif /* _LINUX_IO_URING_CMD_H */
150