xref: /linux/include/linux/io_uring/cmd.h (revision d53b8e36925256097a08d7cb749198d85cbf9b2b)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _LINUX_IO_URING_CMD_H
3 #define _LINUX_IO_URING_CMD_H
4 
5 #include <uapi/linux/io_uring.h>
6 #include <linux/io_uring_types.h>
7 
8 /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
9 #define IORING_URING_CMD_CANCELABLE	(1U << 30)
10 
11 struct io_uring_cmd {
12 	struct file	*file;
13 	const struct io_uring_sqe *sqe;
14 	/* callback to defer completions to task context */
15 	void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
16 	u32		cmd_op;
17 	u32		flags;
18 	u8		pdu[32]; /* available inline for free use */
19 };
20 
21 static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
22 {
23 	return sqe->cmd;
24 }
25 
26 #if defined(CONFIG_IO_URING)
27 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
28 			      struct iov_iter *iter, void *ioucmd);
29 
30 /*
31  * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
32  * and the corresponding io_uring request.
33  *
34  * Note: the caller should never hard code @issue_flags and is only allowed
35  * to pass the mask provided by the core io_uring code.
36  */
37 void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, ssize_t res2,
38 			unsigned issue_flags);
39 
40 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
41 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
42 			    unsigned flags);
43 
44 /*
45  * Note: the caller should never hard code @issue_flags and only use the
46  * mask provided by the core io_uring code.
47  */
48 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
49 		unsigned int issue_flags);
50 
51 #else
52 static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
53 			      struct iov_iter *iter, void *ioucmd)
54 {
55 	return -EOPNOTSUPP;
56 }
57 static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
58 		ssize_t ret2, unsigned issue_flags)
59 {
60 }
61 static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
62 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
63 			    unsigned flags)
64 {
65 }
66 static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
67 		unsigned int issue_flags)
68 {
69 }
70 #endif
71 
72 /*
73  * Polled completions must ensure they are coming from a poll queue, and
74  * hence are completed inside the usual poll handling loops.
75  */
76 static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
77 					    ssize_t ret, ssize_t res2)
78 {
79 	lockdep_assert(in_task());
80 	io_uring_cmd_done(ioucmd, ret, res2, 0);
81 }
82 
83 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
84 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
85 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
86 {
87 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
88 }
89 
90 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
91 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
92 {
93 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
94 }
95 
96 static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
97 {
98 	return cmd_to_io_kiocb(cmd)->task;
99 }
100 
101 #endif /* _LINUX_IO_URING_CMD_H */
102