xref: /linux/io_uring/uring_cmd.c (revision 547c5775a742d9c83891b629b75d1d4c8e88d8c0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
8 
9 #include <uapi/linux/io_uring.h>
10 
11 #include "io_uring.h"
12 #include "alloc_cache.h"
13 #include "rsrc.h"
14 #include "uring_cmd.h"
15 
io_cmd_cache_free(const void * entry)16 void io_cmd_cache_free(const void *entry)
17 {
18 	struct io_async_cmd *ac = (struct io_async_cmd *)entry;
19 
20 	io_vec_free(&ac->vec);
21 	kfree(ac);
22 }
23 
io_req_uring_cleanup(struct io_kiocb * req,unsigned int issue_flags)24 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
25 {
26 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
27 	struct io_async_cmd *ac = req->async_data;
28 	struct io_uring_cmd_data *cache = &ac->data;
29 
30 	if (cache->op_data) {
31 		kfree(cache->op_data);
32 		cache->op_data = NULL;
33 	}
34 
35 	if (issue_flags & IO_URING_F_UNLOCKED)
36 		return;
37 
38 	io_alloc_cache_vec_kasan(&ac->vec);
39 	if (ac->vec.nr > IO_VEC_CACHE_SOFT_CAP)
40 		io_vec_free(&ac->vec);
41 
42 	if (io_alloc_cache_put(&req->ctx->cmd_cache, cache)) {
43 		ioucmd->sqe = NULL;
44 		req->async_data = NULL;
45 		req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
46 	}
47 }
48 
io_uring_cmd_cleanup(struct io_kiocb * req)49 void io_uring_cmd_cleanup(struct io_kiocb *req)
50 {
51 	io_req_uring_cleanup(req, 0);
52 }
53 
io_uring_try_cancel_uring_cmd(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)54 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
55 				   struct io_uring_task *tctx, bool cancel_all)
56 {
57 	struct hlist_node *tmp;
58 	struct io_kiocb *req;
59 	bool ret = false;
60 
61 	lockdep_assert_held(&ctx->uring_lock);
62 
63 	hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
64 			hash_node) {
65 		struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
66 				struct io_uring_cmd);
67 		struct file *file = req->file;
68 
69 		if (!cancel_all && req->tctx != tctx)
70 			continue;
71 
72 		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
73 			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
74 						   IO_URING_F_COMPLETE_DEFER);
75 			ret = true;
76 		}
77 	}
78 	io_submit_flush_completions(ctx);
79 	return ret;
80 }
81 
io_uring_cmd_del_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)82 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
83 		unsigned int issue_flags)
84 {
85 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
86 	struct io_ring_ctx *ctx = req->ctx;
87 
88 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
89 		return;
90 
91 	cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
92 	io_ring_submit_lock(ctx, issue_flags);
93 	hlist_del(&req->hash_node);
94 	io_ring_submit_unlock(ctx, issue_flags);
95 }
96 
97 /*
98  * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
99  * will try to cancel this issued command by sending ->uring_cmd() with
100  * issue_flags of IO_URING_F_CANCEL.
101  *
102  * The command is guaranteed to not be done when calling ->uring_cmd()
103  * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
104  * with race between io_uring canceling and normal completion.
105  */
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)106 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
107 		unsigned int issue_flags)
108 {
109 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
110 	struct io_ring_ctx *ctx = req->ctx;
111 
112 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
113 		cmd->flags |= IORING_URING_CMD_CANCELABLE;
114 		io_ring_submit_lock(ctx, issue_flags);
115 		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
116 		io_ring_submit_unlock(ctx, issue_flags);
117 	}
118 }
119 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
120 
io_uring_cmd_work(struct io_kiocb * req,io_tw_token_t tw)121 static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
122 {
123 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
124 	unsigned int flags = IO_URING_F_COMPLETE_DEFER;
125 
126 	if (io_should_terminate_tw())
127 		flags |= IO_URING_F_TASK_DEAD;
128 
129 	/* task_work executor checks the deffered list completion */
130 	ioucmd->task_work_cb(ioucmd, flags);
131 }
132 
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)133 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
134 			void (*task_work_cb)(struct io_uring_cmd *, unsigned),
135 			unsigned flags)
136 {
137 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
138 
139 	ioucmd->task_work_cb = task_work_cb;
140 	req->io_task_work.func = io_uring_cmd_work;
141 	__io_req_task_work_add(req, flags);
142 }
143 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
144 
io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2)145 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
146 					  u64 extra1, u64 extra2)
147 {
148 	req->big_cqe.extra1 = extra1;
149 	req->big_cqe.extra2 = extra2;
150 }
151 
152 /*
153  * Called by consumers of io_uring_cmd, if they originally returned
154  * -EIOCBQUEUED upon receiving the command.
155  */
io_uring_cmd_done(struct io_uring_cmd * ioucmd,ssize_t ret,u64 res2,unsigned issue_flags)156 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
157 		       unsigned issue_flags)
158 {
159 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
160 
161 	io_uring_cmd_del_cancelable(ioucmd, issue_flags);
162 
163 	if (ret < 0)
164 		req_set_fail(req);
165 
166 	io_req_set_res(req, ret, 0);
167 	if (req->ctx->flags & IORING_SETUP_CQE32)
168 		io_req_set_cqe32_extra(req, res2, 0);
169 	io_req_uring_cleanup(req, issue_flags);
170 	if (req->ctx->flags & IORING_SETUP_IOPOLL) {
171 		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
172 		smp_store_release(&req->iopoll_completed, 1);
173 	} else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
174 		if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
175 			return;
176 		io_req_complete_defer(req);
177 	} else {
178 		req->io_task_work.func = io_req_task_complete;
179 		io_req_task_work_add(req);
180 	}
181 }
182 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
183 
io_uring_cmd_prep_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)184 static int io_uring_cmd_prep_setup(struct io_kiocb *req,
185 				   const struct io_uring_sqe *sqe)
186 {
187 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
188 	struct io_async_cmd *ac;
189 
190 	/* see io_uring_cmd_get_async_data() */
191 	BUILD_BUG_ON(offsetof(struct io_async_cmd, data) != 0);
192 
193 	ac = io_uring_alloc_async_data(&req->ctx->cmd_cache, req);
194 	if (!ac)
195 		return -ENOMEM;
196 	ac->data.op_data = NULL;
197 
198 	/*
199 	 * Unconditionally cache the SQE for now - this is only needed for
200 	 * requests that go async, but prep handlers must ensure that any
201 	 * sqe data is stable beyond prep. Since uring_cmd is special in
202 	 * that it doesn't read in per-op data, play it safe and ensure that
203 	 * any SQE data is stable beyond prep. This can later get relaxed.
204 	 */
205 	memcpy(ac->sqes, sqe, uring_sqe_size(req->ctx));
206 	ioucmd->sqe = ac->sqes;
207 	return 0;
208 }
209 
io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)210 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
211 {
212 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
213 
214 	if (sqe->__pad1)
215 		return -EINVAL;
216 
217 	ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
218 	if (ioucmd->flags & ~IORING_URING_CMD_MASK)
219 		return -EINVAL;
220 
221 	if (ioucmd->flags & IORING_URING_CMD_FIXED)
222 		req->buf_index = READ_ONCE(sqe->buf_index);
223 
224 	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
225 
226 	return io_uring_cmd_prep_setup(req, sqe);
227 }
228 
io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags)229 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
230 {
231 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
232 	struct io_ring_ctx *ctx = req->ctx;
233 	struct file *file = req->file;
234 	int ret;
235 
236 	if (!file->f_op->uring_cmd)
237 		return -EOPNOTSUPP;
238 
239 	ret = security_uring_cmd(ioucmd);
240 	if (ret)
241 		return ret;
242 
243 	if (ctx->flags & IORING_SETUP_SQE128)
244 		issue_flags |= IO_URING_F_SQE128;
245 	if (ctx->flags & IORING_SETUP_CQE32)
246 		issue_flags |= IO_URING_F_CQE32;
247 	if (io_is_compat(ctx))
248 		issue_flags |= IO_URING_F_COMPAT;
249 	if (ctx->flags & IORING_SETUP_IOPOLL) {
250 		if (!file->f_op->uring_cmd_iopoll)
251 			return -EOPNOTSUPP;
252 		issue_flags |= IO_URING_F_IOPOLL;
253 		req->iopoll_completed = 0;
254 		if (ctx->flags & IORING_SETUP_HYBRID_IOPOLL) {
255 			/* make sure every req only blocks once */
256 			req->flags &= ~REQ_F_IOPOLL_STATE;
257 			req->iopoll_start = ktime_get_ns();
258 		}
259 	}
260 
261 	ret = file->f_op->uring_cmd(ioucmd, issue_flags);
262 	if (ret == -EAGAIN || ret == -EIOCBQUEUED)
263 		return ret;
264 	if (ret < 0)
265 		req_set_fail(req);
266 	io_req_uring_cleanup(req, issue_flags);
267 	io_req_set_res(req, ret, 0);
268 	return IOU_COMPLETE;
269 }
270 
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,struct io_uring_cmd * ioucmd,unsigned int issue_flags)271 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
272 			      struct iov_iter *iter,
273 			      struct io_uring_cmd *ioucmd,
274 			      unsigned int issue_flags)
275 {
276 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
277 
278 	if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
279 		return -EINVAL;
280 
281 	return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
282 }
283 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
284 
io_uring_cmd_import_fixed_vec(struct io_uring_cmd * ioucmd,const struct iovec __user * uvec,size_t uvec_segs,int ddir,struct iov_iter * iter,unsigned issue_flags)285 int io_uring_cmd_import_fixed_vec(struct io_uring_cmd *ioucmd,
286 				  const struct iovec __user *uvec,
287 				  size_t uvec_segs,
288 				  int ddir, struct iov_iter *iter,
289 				  unsigned issue_flags)
290 {
291 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
292 	struct io_async_cmd *ac = req->async_data;
293 	int ret;
294 
295 	if (WARN_ON_ONCE(!(ioucmd->flags & IORING_URING_CMD_FIXED)))
296 		return -EINVAL;
297 
298 	ret = io_prep_reg_iovec(req, &ac->vec, uvec, uvec_segs);
299 	if (ret)
300 		return ret;
301 
302 	return io_import_reg_vec(ddir, iter, req, &ac->vec, uvec_segs,
303 				 issue_flags);
304 }
305 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed_vec);
306 
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)307 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
308 {
309 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
310 
311 	io_req_queue_iowq(req);
312 }
313