xref: /linux/include/linux/io_uring/cmd.h (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1b66509b8SPavel Begunkov /* SPDX-License-Identifier: GPL-2.0-or-later */
2b66509b8SPavel Begunkov #ifndef _LINUX_IO_URING_CMD_H
3b66509b8SPavel Begunkov #define _LINUX_IO_URING_CMD_H
4b66509b8SPavel Begunkov 
5b66509b8SPavel Begunkov #include <uapi/linux/io_uring.h>
6b66509b8SPavel Begunkov #include <linux/io_uring_types.h>
7b66509b8SPavel Begunkov 
8b66509b8SPavel Begunkov /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
9b66509b8SPavel Begunkov #define IORING_URING_CMD_CANCELABLE	(1U << 30)
10b66509b8SPavel Begunkov 
11b66509b8SPavel Begunkov struct io_uring_cmd {
12b66509b8SPavel Begunkov 	struct file	*file;
13b66509b8SPavel Begunkov 	const struct io_uring_sqe *sqe;
14b66509b8SPavel Begunkov 	/* callback to defer completions to task context */
15b66509b8SPavel Begunkov 	void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
16b66509b8SPavel Begunkov 	u32		cmd_op;
17b66509b8SPavel Begunkov 	u32		flags;
18b66509b8SPavel Begunkov 	u8		pdu[32]; /* available inline for free use */
19b66509b8SPavel Begunkov };
20b66509b8SPavel Begunkov 
21dadf03cfSJens Axboe struct io_uring_cmd_data {
22dadf03cfSJens Axboe 	void			*op_data;
233347fa65SJens Axboe 	struct io_uring_sqe	sqes[2];
24dadf03cfSJens Axboe };
25dadf03cfSJens Axboe 
io_uring_sqe_cmd(const struct io_uring_sqe * sqe)26b66509b8SPavel Begunkov static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
27b66509b8SPavel Begunkov {
28b66509b8SPavel Begunkov 	return sqe->cmd;
29b66509b8SPavel Begunkov }
30b66509b8SPavel Begunkov 
io_uring_cmd_private_sz_check(size_t cmd_sz)31a6ccb48eSPavel Begunkov static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
32a6ccb48eSPavel Begunkov {
33a6ccb48eSPavel Begunkov 	BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
34a6ccb48eSPavel Begunkov }
35a6ccb48eSPavel Begunkov #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
36a6ccb48eSPavel Begunkov 	io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
37a6ccb48eSPavel Begunkov 	((pdu_type *)&(cmd)->pdu) \
38a6ccb48eSPavel Begunkov )
39a6ccb48eSPavel Begunkov 
40b66509b8SPavel Begunkov #if defined(CONFIG_IO_URING)
41b66509b8SPavel Begunkov int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
42b66509b8SPavel Begunkov 			      struct iov_iter *iter, void *ioucmd);
4336a005b9SPavel Begunkov 
4436a005b9SPavel Begunkov /*
4536a005b9SPavel Begunkov  * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
4636a005b9SPavel Begunkov  * and the corresponding io_uring request.
4736a005b9SPavel Begunkov  *
4836a005b9SPavel Begunkov  * Note: the caller should never hard code @issue_flags and is only allowed
4936a005b9SPavel Begunkov  * to pass the mask provided by the core io_uring code.
5036a005b9SPavel Begunkov  */
51a07d2d79SBernd Schubert void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
52b66509b8SPavel Begunkov 			unsigned issue_flags);
5336a005b9SPavel Begunkov 
54b66509b8SPavel Begunkov void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
55b66509b8SPavel Begunkov 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
56b66509b8SPavel Begunkov 			    unsigned flags);
57b66509b8SPavel Begunkov 
5836a005b9SPavel Begunkov /*
5936a005b9SPavel Begunkov  * Note: the caller should never hard code @issue_flags and only use the
6036a005b9SPavel Begunkov  * mask provided by the core io_uring code.
6136a005b9SPavel Begunkov  */
62b66509b8SPavel Begunkov void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
63b66509b8SPavel Begunkov 		unsigned int issue_flags);
64b66509b8SPavel Begunkov 
656746ee4cSPavel Begunkov /* Execute the request from a blocking context */
666746ee4cSPavel Begunkov void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
676746ee4cSPavel Begunkov 
68b66509b8SPavel Begunkov #else
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,void * ioucmd)69b66509b8SPavel Begunkov static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
70b66509b8SPavel Begunkov 			      struct iov_iter *iter, void *ioucmd)
71b66509b8SPavel Begunkov {
72b66509b8SPavel Begunkov 	return -EOPNOTSUPP;
73b66509b8SPavel Begunkov }
io_uring_cmd_done(struct io_uring_cmd * cmd,ssize_t ret,u64 ret2,unsigned issue_flags)74b66509b8SPavel Begunkov static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
75a07d2d79SBernd Schubert 		u64 ret2, unsigned issue_flags)
76b66509b8SPavel Begunkov {
77b66509b8SPavel Begunkov }
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)786b04a373SPavel Begunkov static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
796b04a373SPavel Begunkov 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
806b04a373SPavel Begunkov 			    unsigned flags)
81b66509b8SPavel Begunkov {
82b66509b8SPavel Begunkov }
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)83b66509b8SPavel Begunkov static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
84b66509b8SPavel Begunkov 		unsigned int issue_flags)
85b66509b8SPavel Begunkov {
86b66509b8SPavel Begunkov }
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)876746ee4cSPavel Begunkov static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
886746ee4cSPavel Begunkov {
896746ee4cSPavel Begunkov }
90b66509b8SPavel Begunkov #endif
91b66509b8SPavel Begunkov 
921afdb760SJens Axboe /*
931afdb760SJens Axboe  * Polled completions must ensure they are coming from a poll queue, and
941afdb760SJens Axboe  * hence are completed inside the usual poll handling loops.
951afdb760SJens Axboe  */
io_uring_cmd_iopoll_done(struct io_uring_cmd * ioucmd,ssize_t ret,ssize_t res2)961afdb760SJens Axboe static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
971afdb760SJens Axboe 					    ssize_t ret, ssize_t res2)
981afdb760SJens Axboe {
991afdb760SJens Axboe 	lockdep_assert(in_task());
1001afdb760SJens Axboe 	io_uring_cmd_done(ioucmd, ret, res2, 0);
1011afdb760SJens Axboe }
1021afdb760SJens Axboe 
1036b04a373SPavel Begunkov /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
io_uring_cmd_do_in_task_lazy(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))1046b04a373SPavel Begunkov static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
1056b04a373SPavel Begunkov 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
1066b04a373SPavel Begunkov {
1076b04a373SPavel Begunkov 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
1086b04a373SPavel Begunkov }
1096b04a373SPavel Begunkov 
io_uring_cmd_complete_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))1106b04a373SPavel Begunkov static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
1116b04a373SPavel Begunkov 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
1126b04a373SPavel Begunkov {
1136b04a373SPavel Begunkov 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
1146b04a373SPavel Begunkov }
1156b04a373SPavel Begunkov 
io_uring_cmd_get_task(struct io_uring_cmd * cmd)116055c1562SPavel Begunkov static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
117055c1562SPavel Begunkov {
118b6f58a3fSJens Axboe 	return cmd_to_io_kiocb(cmd)->tctx->task;
119055c1562SPavel Begunkov }
120055c1562SPavel Begunkov 
io_uring_cmd_get_async_data(struct io_uring_cmd * cmd)121*b0af20d3SMark Harmstone static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd)
122*b0af20d3SMark Harmstone {
123*b0af20d3SMark Harmstone 	return cmd_to_io_kiocb(cmd)->async_data;
124*b0af20d3SMark Harmstone }
125*b0af20d3SMark Harmstone 
126b66509b8SPavel Begunkov #endif /* _LINUX_IO_URING_CMD_H */
127