xref: /linux/include/linux/io_uring/cmd.h (revision 1afdb76038e27a3a4dd4cf522f6457868051db84)
1b66509b8SPavel Begunkov /* SPDX-License-Identifier: GPL-2.0-or-later */
2b66509b8SPavel Begunkov #ifndef _LINUX_IO_URING_CMD_H
3b66509b8SPavel Begunkov #define _LINUX_IO_URING_CMD_H
4b66509b8SPavel Begunkov 
5b66509b8SPavel Begunkov #include <uapi/linux/io_uring.h>
6b66509b8SPavel Begunkov #include <linux/io_uring_types.h>
7b66509b8SPavel Begunkov 
8b66509b8SPavel Begunkov /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
9b66509b8SPavel Begunkov #define IORING_URING_CMD_CANCELABLE	(1U << 30)
10b66509b8SPavel Begunkov 
11b66509b8SPavel Begunkov struct io_uring_cmd {
12b66509b8SPavel Begunkov 	struct file	*file;
13b66509b8SPavel Begunkov 	const struct io_uring_sqe *sqe;
14b66509b8SPavel Begunkov 	/* callback to defer completions to task context */
15b66509b8SPavel Begunkov 	void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
16b66509b8SPavel Begunkov 	u32		cmd_op;
17b66509b8SPavel Begunkov 	u32		flags;
18b66509b8SPavel Begunkov 	u8		pdu[32]; /* available inline for free use */
19b66509b8SPavel Begunkov };
20b66509b8SPavel Begunkov 
21b66509b8SPavel Begunkov static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
22b66509b8SPavel Begunkov {
23b66509b8SPavel Begunkov 	return sqe->cmd;
24b66509b8SPavel Begunkov }
25b66509b8SPavel Begunkov 
26b66509b8SPavel Begunkov #if defined(CONFIG_IO_URING)
27b66509b8SPavel Begunkov int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
28b66509b8SPavel Begunkov 			      struct iov_iter *iter, void *ioucmd);
2936a005b9SPavel Begunkov 
3036a005b9SPavel Begunkov /*
3136a005b9SPavel Begunkov  * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
3236a005b9SPavel Begunkov  * and the corresponding io_uring request.
3336a005b9SPavel Begunkov  *
3436a005b9SPavel Begunkov  * Note: the caller should never hard code @issue_flags and is only allowed
3536a005b9SPavel Begunkov  * to pass the mask provided by the core io_uring code.
3636a005b9SPavel Begunkov  */
37b66509b8SPavel Begunkov void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
38b66509b8SPavel Begunkov 			unsigned issue_flags);
3936a005b9SPavel Begunkov 
40b66509b8SPavel Begunkov void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
41b66509b8SPavel Begunkov 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
42b66509b8SPavel Begunkov 			    unsigned flags);
43b66509b8SPavel Begunkov 
4436a005b9SPavel Begunkov /*
4536a005b9SPavel Begunkov  * Note: the caller should never hard code @issue_flags and only use the
4636a005b9SPavel Begunkov  * mask provided by the core io_uring code.
4736a005b9SPavel Begunkov  */
48b66509b8SPavel Begunkov void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
49b66509b8SPavel Begunkov 		unsigned int issue_flags);
50b66509b8SPavel Begunkov 
51b66509b8SPavel Begunkov #else
52b66509b8SPavel Begunkov static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
53b66509b8SPavel Begunkov 			      struct iov_iter *iter, void *ioucmd)
54b66509b8SPavel Begunkov {
55b66509b8SPavel Begunkov 	return -EOPNOTSUPP;
56b66509b8SPavel Begunkov }
57b66509b8SPavel Begunkov static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
58b66509b8SPavel Begunkov 		ssize_t ret2, unsigned issue_flags)
59b66509b8SPavel Begunkov {
60b66509b8SPavel Begunkov }
616b04a373SPavel Begunkov static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
626b04a373SPavel Begunkov 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
636b04a373SPavel Begunkov 			    unsigned flags)
64b66509b8SPavel Begunkov {
65b66509b8SPavel Begunkov }
66b66509b8SPavel Begunkov static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
67b66509b8SPavel Begunkov 		unsigned int issue_flags)
68b66509b8SPavel Begunkov {
69b66509b8SPavel Begunkov }
70b66509b8SPavel Begunkov #endif
71b66509b8SPavel Begunkov 
72*1afdb760SJens Axboe /*
73*1afdb760SJens Axboe  * Polled completions must ensure they are coming from a poll queue, and
74*1afdb760SJens Axboe  * hence are completed inside the usual poll handling loops.
75*1afdb760SJens Axboe  */
76*1afdb760SJens Axboe static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
77*1afdb760SJens Axboe 					    ssize_t ret, ssize_t res2)
78*1afdb760SJens Axboe {
79*1afdb760SJens Axboe 	lockdep_assert(in_task());
80*1afdb760SJens Axboe 	io_uring_cmd_done(ioucmd, ret, res2, 0);
81*1afdb760SJens Axboe }
82*1afdb760SJens Axboe 
836b04a373SPavel Begunkov /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
846b04a373SPavel Begunkov static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
856b04a373SPavel Begunkov 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
866b04a373SPavel Begunkov {
876b04a373SPavel Begunkov 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
886b04a373SPavel Begunkov }
896b04a373SPavel Begunkov 
906b04a373SPavel Begunkov static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
916b04a373SPavel Begunkov 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
926b04a373SPavel Begunkov {
936b04a373SPavel Begunkov 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
946b04a373SPavel Begunkov }
956b04a373SPavel Begunkov 
96055c1562SPavel Begunkov static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
97055c1562SPavel Begunkov {
98055c1562SPavel Begunkov 	return cmd_to_io_kiocb(cmd)->task;
99055c1562SPavel Begunkov }
100055c1562SPavel Begunkov 
101b66509b8SPavel Begunkov #endif /* _LINUX_IO_URING_CMD_H */
102