xref: /linux/include/linux/io_uring/cmd.h (revision a07d2d7930c75e6bf88683b376d09ab1f3fed2aa)
1b66509b8SPavel Begunkov /* SPDX-License-Identifier: GPL-2.0-or-later */
2b66509b8SPavel Begunkov #ifndef _LINUX_IO_URING_CMD_H
3b66509b8SPavel Begunkov #define _LINUX_IO_URING_CMD_H
4b66509b8SPavel Begunkov 
5b66509b8SPavel Begunkov #include <uapi/linux/io_uring.h>
6b66509b8SPavel Begunkov #include <linux/io_uring_types.h>
7b66509b8SPavel Begunkov 
8b66509b8SPavel Begunkov /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
9b66509b8SPavel Begunkov #define IORING_URING_CMD_CANCELABLE	(1U << 30)
10b66509b8SPavel Begunkov 
11b66509b8SPavel Begunkov struct io_uring_cmd {
12b66509b8SPavel Begunkov 	struct file	*file;
13b66509b8SPavel Begunkov 	const struct io_uring_sqe *sqe;
14b66509b8SPavel Begunkov 	/* callback to defer completions to task context */
15b66509b8SPavel Begunkov 	void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
16b66509b8SPavel Begunkov 	u32		cmd_op;
17b66509b8SPavel Begunkov 	u32		flags;
18b66509b8SPavel Begunkov 	u8		pdu[32]; /* available inline for free use */
19b66509b8SPavel Begunkov };
20b66509b8SPavel Begunkov 
21b66509b8SPavel Begunkov static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
22b66509b8SPavel Begunkov {
23b66509b8SPavel Begunkov 	return sqe->cmd;
24b66509b8SPavel Begunkov }
25b66509b8SPavel Begunkov 
26a6ccb48eSPavel Begunkov static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
27a6ccb48eSPavel Begunkov {
28a6ccb48eSPavel Begunkov 	BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
29a6ccb48eSPavel Begunkov }
30a6ccb48eSPavel Begunkov #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
31a6ccb48eSPavel Begunkov 	io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
32a6ccb48eSPavel Begunkov 	((pdu_type *)&(cmd)->pdu) \
33a6ccb48eSPavel Begunkov )
34a6ccb48eSPavel Begunkov 
35b66509b8SPavel Begunkov #if defined(CONFIG_IO_URING)
36b66509b8SPavel Begunkov int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
37b66509b8SPavel Begunkov 			      struct iov_iter *iter, void *ioucmd);
3836a005b9SPavel Begunkov 
3936a005b9SPavel Begunkov /*
4036a005b9SPavel Begunkov  * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
4136a005b9SPavel Begunkov  * and the corresponding io_uring request.
4236a005b9SPavel Begunkov  *
4336a005b9SPavel Begunkov  * Note: the caller should never hard code @issue_flags and is only allowed
4436a005b9SPavel Begunkov  * to pass the mask provided by the core io_uring code.
4536a005b9SPavel Begunkov  */
46*a07d2d79SBernd Schubert void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
47b66509b8SPavel Begunkov 			unsigned issue_flags);
4836a005b9SPavel Begunkov 
49b66509b8SPavel Begunkov void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
50b66509b8SPavel Begunkov 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
51b66509b8SPavel Begunkov 			    unsigned flags);
52b66509b8SPavel Begunkov 
5336a005b9SPavel Begunkov /*
5436a005b9SPavel Begunkov  * Note: the caller should never hard code @issue_flags and only use the
5536a005b9SPavel Begunkov  * mask provided by the core io_uring code.
5636a005b9SPavel Begunkov  */
57b66509b8SPavel Begunkov void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
58b66509b8SPavel Begunkov 		unsigned int issue_flags);
59b66509b8SPavel Begunkov 
606746ee4cSPavel Begunkov /* Execute the request from a blocking context */
616746ee4cSPavel Begunkov void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
626746ee4cSPavel Begunkov 
63b66509b8SPavel Begunkov #else
64b66509b8SPavel Begunkov static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
65b66509b8SPavel Begunkov 			      struct iov_iter *iter, void *ioucmd)
66b66509b8SPavel Begunkov {
67b66509b8SPavel Begunkov 	return -EOPNOTSUPP;
68b66509b8SPavel Begunkov }
69b66509b8SPavel Begunkov static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
70*a07d2d79SBernd Schubert 		u64 ret2, unsigned issue_flags)
71b66509b8SPavel Begunkov {
72b66509b8SPavel Begunkov }
736b04a373SPavel Begunkov static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
746b04a373SPavel Begunkov 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
756b04a373SPavel Begunkov 			    unsigned flags)
76b66509b8SPavel Begunkov {
77b66509b8SPavel Begunkov }
78b66509b8SPavel Begunkov static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
79b66509b8SPavel Begunkov 		unsigned int issue_flags)
80b66509b8SPavel Begunkov {
81b66509b8SPavel Begunkov }
826746ee4cSPavel Begunkov static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
836746ee4cSPavel Begunkov {
846746ee4cSPavel Begunkov }
85b66509b8SPavel Begunkov #endif
86b66509b8SPavel Begunkov 
871afdb760SJens Axboe /*
881afdb760SJens Axboe  * Polled completions must ensure they are coming from a poll queue, and
891afdb760SJens Axboe  * hence are completed inside the usual poll handling loops.
901afdb760SJens Axboe  */
911afdb760SJens Axboe static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
921afdb760SJens Axboe 					    ssize_t ret, ssize_t res2)
931afdb760SJens Axboe {
941afdb760SJens Axboe 	lockdep_assert(in_task());
951afdb760SJens Axboe 	io_uring_cmd_done(ioucmd, ret, res2, 0);
961afdb760SJens Axboe }
971afdb760SJens Axboe 
986b04a373SPavel Begunkov /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
996b04a373SPavel Begunkov static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
1006b04a373SPavel Begunkov 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
1016b04a373SPavel Begunkov {
1026b04a373SPavel Begunkov 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
1036b04a373SPavel Begunkov }
1046b04a373SPavel Begunkov 
1056b04a373SPavel Begunkov static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
1066b04a373SPavel Begunkov 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
1076b04a373SPavel Begunkov {
1086b04a373SPavel Begunkov 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
1096b04a373SPavel Begunkov }
1106b04a373SPavel Begunkov 
111055c1562SPavel Begunkov static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
112055c1562SPavel Begunkov {
113b6f58a3fSJens Axboe 	return cmd_to_io_kiocb(cmd)->tctx->task;
114055c1562SPavel Begunkov }
115055c1562SPavel Begunkov 
116b66509b8SPavel Begunkov #endif /* _LINUX_IO_URING_CMD_H */
117