xref: /linux/include/linux/io_uring/cmd.h (revision 6746ee4c3a189f8b60694f01e7e29bc5ff7972e0)
1b66509b8SPavel Begunkov /* SPDX-License-Identifier: GPL-2.0-or-later */
2b66509b8SPavel Begunkov #ifndef _LINUX_IO_URING_CMD_H
3b66509b8SPavel Begunkov #define _LINUX_IO_URING_CMD_H
4b66509b8SPavel Begunkov 
5b66509b8SPavel Begunkov #include <uapi/linux/io_uring.h>
6b66509b8SPavel Begunkov #include <linux/io_uring_types.h>
7b66509b8SPavel Begunkov 
8b66509b8SPavel Begunkov /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
9b66509b8SPavel Begunkov #define IORING_URING_CMD_CANCELABLE	(1U << 30)
10b66509b8SPavel Begunkov 
11b66509b8SPavel Begunkov struct io_uring_cmd {
12b66509b8SPavel Begunkov 	struct file	*file;
13b66509b8SPavel Begunkov 	const struct io_uring_sqe *sqe;
14b66509b8SPavel Begunkov 	/* callback to defer completions to task context */
15b66509b8SPavel Begunkov 	void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
16b66509b8SPavel Begunkov 	u32		cmd_op;
17b66509b8SPavel Begunkov 	u32		flags;
18b66509b8SPavel Begunkov 	u8		pdu[32]; /* available inline for free use */
19b66509b8SPavel Begunkov };
20b66509b8SPavel Begunkov 
21b66509b8SPavel Begunkov static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
22b66509b8SPavel Begunkov {
23b66509b8SPavel Begunkov 	return sqe->cmd;
24b66509b8SPavel Begunkov }
25b66509b8SPavel Begunkov 
26b66509b8SPavel Begunkov #if defined(CONFIG_IO_URING)
27b66509b8SPavel Begunkov int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
28b66509b8SPavel Begunkov 			      struct iov_iter *iter, void *ioucmd);
2936a005b9SPavel Begunkov 
3036a005b9SPavel Begunkov /*
3136a005b9SPavel Begunkov  * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
3236a005b9SPavel Begunkov  * and the corresponding io_uring request.
3336a005b9SPavel Begunkov  *
3436a005b9SPavel Begunkov  * Note: the caller should never hard code @issue_flags and is only allowed
3536a005b9SPavel Begunkov  * to pass the mask provided by the core io_uring code.
3636a005b9SPavel Begunkov  */
37b66509b8SPavel Begunkov void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
38b66509b8SPavel Begunkov 			unsigned issue_flags);
3936a005b9SPavel Begunkov 
40b66509b8SPavel Begunkov void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
41b66509b8SPavel Begunkov 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
42b66509b8SPavel Begunkov 			    unsigned flags);
43b66509b8SPavel Begunkov 
4436a005b9SPavel Begunkov /*
4536a005b9SPavel Begunkov  * Note: the caller should never hard code @issue_flags and only use the
4636a005b9SPavel Begunkov  * mask provided by the core io_uring code.
4736a005b9SPavel Begunkov  */
48b66509b8SPavel Begunkov void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
49b66509b8SPavel Begunkov 		unsigned int issue_flags);
50b66509b8SPavel Begunkov 
51*6746ee4cSPavel Begunkov /* Execute the request from a blocking context */
52*6746ee4cSPavel Begunkov void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
53*6746ee4cSPavel Begunkov 
54b66509b8SPavel Begunkov #else
55b66509b8SPavel Begunkov static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
56b66509b8SPavel Begunkov 			      struct iov_iter *iter, void *ioucmd)
57b66509b8SPavel Begunkov {
58b66509b8SPavel Begunkov 	return -EOPNOTSUPP;
59b66509b8SPavel Begunkov }
60b66509b8SPavel Begunkov static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
61b66509b8SPavel Begunkov 		ssize_t ret2, unsigned issue_flags)
62b66509b8SPavel Begunkov {
63b66509b8SPavel Begunkov }
646b04a373SPavel Begunkov static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
656b04a373SPavel Begunkov 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
666b04a373SPavel Begunkov 			    unsigned flags)
67b66509b8SPavel Begunkov {
68b66509b8SPavel Begunkov }
69b66509b8SPavel Begunkov static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
70b66509b8SPavel Begunkov 		unsigned int issue_flags)
71b66509b8SPavel Begunkov {
72b66509b8SPavel Begunkov }
73*6746ee4cSPavel Begunkov static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
74*6746ee4cSPavel Begunkov {
75*6746ee4cSPavel Begunkov }
76b66509b8SPavel Begunkov #endif
77b66509b8SPavel Begunkov 
781afdb760SJens Axboe /*
791afdb760SJens Axboe  * Polled completions must ensure they are coming from a poll queue, and
801afdb760SJens Axboe  * hence are completed inside the usual poll handling loops.
811afdb760SJens Axboe  */
821afdb760SJens Axboe static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
831afdb760SJens Axboe 					    ssize_t ret, ssize_t res2)
841afdb760SJens Axboe {
851afdb760SJens Axboe 	lockdep_assert(in_task());
861afdb760SJens Axboe 	io_uring_cmd_done(ioucmd, ret, res2, 0);
871afdb760SJens Axboe }
881afdb760SJens Axboe 
896b04a373SPavel Begunkov /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
906b04a373SPavel Begunkov static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
916b04a373SPavel Begunkov 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
926b04a373SPavel Begunkov {
936b04a373SPavel Begunkov 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
946b04a373SPavel Begunkov }
956b04a373SPavel Begunkov 
966b04a373SPavel Begunkov static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
976b04a373SPavel Begunkov 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
986b04a373SPavel Begunkov {
996b04a373SPavel Begunkov 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
1006b04a373SPavel Begunkov }
1016b04a373SPavel Begunkov 
102055c1562SPavel Begunkov static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
103055c1562SPavel Begunkov {
104055c1562SPavel Begunkov 	return cmd_to_io_kiocb(cmd)->task;
105055c1562SPavel Begunkov }
106055c1562SPavel Begunkov 
107b66509b8SPavel Begunkov #endif /* _LINUX_IO_URING_CMD_H */
108