xref: /linux/include/linux/io_uring/cmd.h (revision ea8d7647f9ddf1f81e2027ed305299797299aa03)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _LINUX_IO_URING_CMD_H
3 #define _LINUX_IO_URING_CMD_H
4 
5 #include <uapi/linux/io_uring.h>
6 #include <linux/io_uring_types.h>
7 #include <linux/blk-mq.h>
8 
9 /* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
10 #define IORING_URING_CMD_CANCELABLE	(1U << 30)
11 
12 struct io_uring_cmd {
13 	struct file	*file;
14 	const struct io_uring_sqe *sqe;
15 	/* callback to defer completions to task context */
16 	void (*task_work_cb)(struct io_uring_cmd *cmd, unsigned);
17 	u32		cmd_op;
18 	u32		flags;
19 	u8		pdu[32]; /* available inline for free use */
20 };
21 
22 struct io_uring_cmd_data {
23 	void			*op_data;
24 	struct io_uring_sqe	sqes[2];
25 };
26 
27 static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe)
28 {
29 	return sqe->cmd;
30 }
31 
32 static inline void io_uring_cmd_private_sz_check(size_t cmd_sz)
33 {
34 	BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu));
35 }
36 #define io_uring_cmd_to_pdu(cmd, pdu_type) ( \
37 	io_uring_cmd_private_sz_check(sizeof(pdu_type)), \
38 	((pdu_type *)&(cmd)->pdu) \
39 )
40 
41 #if defined(CONFIG_IO_URING)
42 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
43 			      struct iov_iter *iter,
44 			      struct io_uring_cmd *ioucmd,
45 			      unsigned int issue_flags);
46 int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
47 				  const struct iovec __user *uvec,
48 				  size_t uvec_segs,
49 				  int ddir, struct iov_iter *iter,
50 				  unsigned issue_flags);
51 
52 /*
53  * Completes the request, i.e. posts an io_uring CQE and deallocates @ioucmd
54  * and the corresponding io_uring request.
55  *
56  * Note: the caller should never hard code @issue_flags and is only allowed
57  * to pass the mask provided by the core io_uring code.
58  */
59 void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret, u64 res2,
60 			unsigned issue_flags);
61 
62 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
63 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
64 			    unsigned flags);
65 
66 /*
67  * Note: the caller should never hard code @issue_flags and only use the
68  * mask provided by the core io_uring code.
69  */
70 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
71 		unsigned int issue_flags);
72 
73 /* Execute the request from a blocking context */
74 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd);
75 
76 #else
77 static inline int
78 io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
79 			  struct iov_iter *iter, struct io_uring_cmd *ioucmd,
80 			  unsigned int issue_flags)
81 {
82 	return -EOPNOTSUPP;
83 }
84 static inline int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
85 						const struct iovec __user *uvec,
86 						size_t uvec_segs,
87 						int ddir, struct iov_iter *iter,
88 						unsigned issue_flags)
89 {
90 	return -EOPNOTSUPP;
91 }
92 static inline void io_uring_cmd_done(struct io_uring_cmd *cmd, ssize_t ret,
93 		u64 ret2, unsigned issue_flags)
94 {
95 }
96 static inline void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
97 			    void (*task_work_cb)(struct io_uring_cmd *, unsigned),
98 			    unsigned flags)
99 {
100 }
101 static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
102 		unsigned int issue_flags)
103 {
104 }
105 static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
106 {
107 }
108 #endif
109 
110 /*
111  * Polled completions must ensure they are coming from a poll queue, and
112  * hence are completed inside the usual poll handling loops.
113  */
114 static inline void io_uring_cmd_iopoll_done(struct io_uring_cmd *ioucmd,
115 					    ssize_t ret, ssize_t res2)
116 {
117 	lockdep_assert(in_task());
118 	io_uring_cmd_done(ioucmd, ret, res2, 0);
119 }
120 
121 /* users must follow the IOU_F_TWQ_LAZY_WAKE semantics */
122 static inline void io_uring_cmd_do_in_task_lazy(struct io_uring_cmd *ioucmd,
123 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
124 {
125 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, IOU_F_TWQ_LAZY_WAKE);
126 }
127 
128 static inline void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
129 			void (*task_work_cb)(struct io_uring_cmd *, unsigned))
130 {
131 	__io_uring_cmd_do_in_task(ioucmd, task_work_cb, 0);
132 }
133 
134 static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
135 {
136 	return cmd_to_io_kiocb(cmd)->tctx->task;
137 }
138 
139 static inline struct io_uring_cmd_data *io_uring_cmd_get_async_data(struct io_uring_cmd *cmd)
140 {
141 	return cmd_to_io_kiocb(cmd)->async_data;
142 }
143 
144 int io_buffer_register_bvec(struct io_uring_cmd *cmd, struct request *rq,
145 			    void (*release)(void *), unsigned int index,
146 			    unsigned int issue_flags);
147 int io_buffer_unregister_bvec(struct io_uring_cmd *cmd, unsigned int index,
148 			      unsigned int issue_flags);
149 
150 #endif /* _LINUX_IO_URING_CMD_H */
151