xref: /linux/io_uring/uring_cmd.c (revision aa0274d261cc50af416883effdab505fad400485)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/io_uring/net.h>
7 #include <linux/security.h>
8 #include <linux/nospec.h>
9 #include <net/sock.h>
10 
11 #include <uapi/linux/io_uring.h>
12 #include <asm/ioctls.h>
13 
14 #include "io_uring.h"
15 #include "alloc_cache.h"
16 #include "rsrc.h"
17 #include "uring_cmd.h"
18 
io_uring_async_get(struct io_kiocb * req)19 static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
20 {
21 	struct io_ring_ctx *ctx = req->ctx;
22 	struct uring_cache *cache;
23 
24 	cache = io_alloc_cache_get(&ctx->uring_cache);
25 	if (cache) {
26 		req->flags |= REQ_F_ASYNC_DATA;
27 		req->async_data = cache;
28 		return cache;
29 	}
30 	if (!io_alloc_async_data(req))
31 		return req->async_data;
32 	return NULL;
33 }
34 
io_req_uring_cleanup(struct io_kiocb * req,unsigned int issue_flags)35 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
36 {
37 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
38 	struct uring_cache *cache = req->async_data;
39 
40 	if (issue_flags & IO_URING_F_UNLOCKED)
41 		return;
42 	if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
43 		ioucmd->sqe = NULL;
44 		req->async_data = NULL;
45 		req->flags &= ~REQ_F_ASYNC_DATA;
46 	}
47 }
48 
io_uring_try_cancel_uring_cmd(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)49 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
50 				   struct io_uring_task *tctx, bool cancel_all)
51 {
52 	struct hlist_node *tmp;
53 	struct io_kiocb *req;
54 	bool ret = false;
55 
56 	lockdep_assert_held(&ctx->uring_lock);
57 
58 	hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
59 			hash_node) {
60 		struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
61 				struct io_uring_cmd);
62 		struct file *file = req->file;
63 
64 		if (!cancel_all && req->tctx != tctx)
65 			continue;
66 
67 		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
68 			/* ->sqe isn't available if no async data */
69 			if (!req_has_async_data(req))
70 				cmd->sqe = NULL;
71 			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
72 						   IO_URING_F_COMPLETE_DEFER);
73 			ret = true;
74 		}
75 	}
76 	io_submit_flush_completions(ctx);
77 	return ret;
78 }
79 
io_uring_cmd_del_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)80 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
81 		unsigned int issue_flags)
82 {
83 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
84 	struct io_ring_ctx *ctx = req->ctx;
85 
86 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
87 		return;
88 
89 	cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
90 	io_ring_submit_lock(ctx, issue_flags);
91 	hlist_del(&req->hash_node);
92 	io_ring_submit_unlock(ctx, issue_flags);
93 }
94 
95 /*
96  * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
97  * will try to cancel this issued command by sending ->uring_cmd() with
98  * issue_flags of IO_URING_F_CANCEL.
99  *
100  * The command is guaranteed to not be done when calling ->uring_cmd()
101  * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
102  * with race between io_uring canceling and normal completion.
103  */
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)104 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
105 		unsigned int issue_flags)
106 {
107 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
108 	struct io_ring_ctx *ctx = req->ctx;
109 
110 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
111 		cmd->flags |= IORING_URING_CMD_CANCELABLE;
112 		io_ring_submit_lock(ctx, issue_flags);
113 		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
114 		io_ring_submit_unlock(ctx, issue_flags);
115 	}
116 }
117 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
118 
io_uring_cmd_work(struct io_kiocb * req,struct io_tw_state * ts)119 static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
120 {
121 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
122 	unsigned int flags = IO_URING_F_COMPLETE_DEFER;
123 
124 	if (current->flags & (PF_EXITING | PF_KTHREAD))
125 		flags |= IO_URING_F_TASK_DEAD;
126 
127 	/* task_work executor checks the deffered list completion */
128 	ioucmd->task_work_cb(ioucmd, flags);
129 }
130 
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)131 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
132 			void (*task_work_cb)(struct io_uring_cmd *, unsigned),
133 			unsigned flags)
134 {
135 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
136 
137 	ioucmd->task_work_cb = task_work_cb;
138 	req->io_task_work.func = io_uring_cmd_work;
139 	__io_req_task_work_add(req, flags);
140 }
141 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
142 
io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2)143 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
144 					  u64 extra1, u64 extra2)
145 {
146 	req->big_cqe.extra1 = extra1;
147 	req->big_cqe.extra2 = extra2;
148 }
149 
150 /*
151  * Called by consumers of io_uring_cmd, if they originally returned
152  * -EIOCBQUEUED upon receiving the command.
153  */
io_uring_cmd_done(struct io_uring_cmd * ioucmd,ssize_t ret,u64 res2,unsigned issue_flags)154 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
155 		       unsigned issue_flags)
156 {
157 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
158 
159 	io_uring_cmd_del_cancelable(ioucmd, issue_flags);
160 
161 	if (ret < 0)
162 		req_set_fail(req);
163 
164 	io_req_set_res(req, ret, 0);
165 	if (req->ctx->flags & IORING_SETUP_CQE32)
166 		io_req_set_cqe32_extra(req, res2, 0);
167 	io_req_uring_cleanup(req, issue_flags);
168 	if (req->ctx->flags & IORING_SETUP_IOPOLL) {
169 		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
170 		smp_store_release(&req->iopoll_completed, 1);
171 	} else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
172 		if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
173 			return;
174 		io_req_complete_defer(req);
175 	} else {
176 		req->io_task_work.func = io_req_task_complete;
177 		io_req_task_work_add(req);
178 	}
179 }
180 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
181 
io_uring_cmd_prep_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)182 static int io_uring_cmd_prep_setup(struct io_kiocb *req,
183 				   const struct io_uring_sqe *sqe)
184 {
185 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
186 	struct uring_cache *cache;
187 
188 	cache = io_uring_async_get(req);
189 	if (unlikely(!cache))
190 		return -ENOMEM;
191 
192 	if (!(req->flags & REQ_F_FORCE_ASYNC)) {
193 		/* defer memcpy until we need it */
194 		ioucmd->sqe = sqe;
195 		return 0;
196 	}
197 
198 	memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
199 	ioucmd->sqe = req->async_data;
200 	return 0;
201 }
202 
io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)203 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
204 {
205 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
206 
207 	if (sqe->__pad1)
208 		return -EINVAL;
209 
210 	ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
211 	if (ioucmd->flags & ~IORING_URING_CMD_MASK)
212 		return -EINVAL;
213 
214 	if (ioucmd->flags & IORING_URING_CMD_FIXED) {
215 		struct io_ring_ctx *ctx = req->ctx;
216 		struct io_rsrc_node *node;
217 		u16 index = READ_ONCE(sqe->buf_index);
218 
219 		node = io_rsrc_node_lookup(&ctx->buf_table, index);
220 		if (unlikely(!node))
221 			return -EFAULT;
222 		/*
223 		 * Pi node upfront, prior to io_uring_cmd_import_fixed()
224 		 * being called. This prevents destruction of the mapped buffer
225 		 * we'll need at actual import time.
226 		 */
227 		io_req_assign_buf_node(req, node);
228 	}
229 	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
230 
231 	return io_uring_cmd_prep_setup(req, sqe);
232 }
233 
io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags)234 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
235 {
236 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
237 	struct io_ring_ctx *ctx = req->ctx;
238 	struct file *file = req->file;
239 	int ret;
240 
241 	if (!file->f_op->uring_cmd)
242 		return -EOPNOTSUPP;
243 
244 	ret = security_uring_cmd(ioucmd);
245 	if (ret)
246 		return ret;
247 
248 	if (ctx->flags & IORING_SETUP_SQE128)
249 		issue_flags |= IO_URING_F_SQE128;
250 	if (ctx->flags & IORING_SETUP_CQE32)
251 		issue_flags |= IO_URING_F_CQE32;
252 	if (ctx->compat)
253 		issue_flags |= IO_URING_F_COMPAT;
254 	if (ctx->flags & IORING_SETUP_IOPOLL) {
255 		if (!file->f_op->uring_cmd_iopoll)
256 			return -EOPNOTSUPP;
257 		issue_flags |= IO_URING_F_IOPOLL;
258 		req->iopoll_completed = 0;
259 	}
260 
261 	ret = file->f_op->uring_cmd(ioucmd, issue_flags);
262 	if (ret == -EAGAIN) {
263 		struct uring_cache *cache = req->async_data;
264 
265 		if (ioucmd->sqe != (void *) cache)
266 			memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
267 		return -EAGAIN;
268 	} else if (ret == -EIOCBQUEUED) {
269 		return -EIOCBQUEUED;
270 	}
271 
272 	if (ret < 0)
273 		req_set_fail(req);
274 	io_req_uring_cleanup(req, issue_flags);
275 	io_req_set_res(req, ret, 0);
276 	return IOU_OK;
277 }
278 
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,void * ioucmd)279 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
280 			      struct iov_iter *iter, void *ioucmd)
281 {
282 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
283 	struct io_rsrc_node *node = req->buf_node;
284 
285 	/* Must have had rsrc_node assigned at prep time */
286 	if (node)
287 		return io_import_fixed(rw, iter, node->buf, ubuf, len);
288 
289 	return -EFAULT;
290 }
291 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
292 
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)293 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
294 {
295 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
296 
297 	io_req_queue_iowq(req);
298 }
299 
io_uring_cmd_getsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)300 static inline int io_uring_cmd_getsockopt(struct socket *sock,
301 					  struct io_uring_cmd *cmd,
302 					  unsigned int issue_flags)
303 {
304 	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
305 	int optlen, optname, level, err;
306 	void __user *optval;
307 
308 	level = READ_ONCE(cmd->sqe->level);
309 	if (level != SOL_SOCKET)
310 		return -EOPNOTSUPP;
311 
312 	optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
313 	optname = READ_ONCE(cmd->sqe->optname);
314 	optlen = READ_ONCE(cmd->sqe->optlen);
315 
316 	err = do_sock_getsockopt(sock, compat, level, optname,
317 				 USER_SOCKPTR(optval),
318 				 KERNEL_SOCKPTR(&optlen));
319 	if (err)
320 		return err;
321 
322 	/* On success, return optlen */
323 	return optlen;
324 }
325 
io_uring_cmd_setsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)326 static inline int io_uring_cmd_setsockopt(struct socket *sock,
327 					  struct io_uring_cmd *cmd,
328 					  unsigned int issue_flags)
329 {
330 	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
331 	int optname, optlen, level;
332 	void __user *optval;
333 	sockptr_t optval_s;
334 
335 	optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
336 	optname = READ_ONCE(cmd->sqe->optname);
337 	optlen = READ_ONCE(cmd->sqe->optlen);
338 	level = READ_ONCE(cmd->sqe->level);
339 	optval_s = USER_SOCKPTR(optval);
340 
341 	return do_sock_setsockopt(sock, compat, level, optname, optval_s,
342 				  optlen);
343 }
344 
345 #if defined(CONFIG_NET)
io_uring_cmd_sock(struct io_uring_cmd * cmd,unsigned int issue_flags)346 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
347 {
348 	struct socket *sock = cmd->file->private_data;
349 	struct sock *sk = sock->sk;
350 	struct proto *prot = READ_ONCE(sk->sk_prot);
351 	int ret, arg = 0;
352 
353 	if (!prot || !prot->ioctl)
354 		return -EOPNOTSUPP;
355 
356 	switch (cmd->sqe->cmd_op) {
357 	case SOCKET_URING_OP_SIOCINQ:
358 		ret = prot->ioctl(sk, SIOCINQ, &arg);
359 		if (ret)
360 			return ret;
361 		return arg;
362 	case SOCKET_URING_OP_SIOCOUTQ:
363 		ret = prot->ioctl(sk, SIOCOUTQ, &arg);
364 		if (ret)
365 			return ret;
366 		return arg;
367 	case SOCKET_URING_OP_GETSOCKOPT:
368 		return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
369 	case SOCKET_URING_OP_SETSOCKOPT:
370 		return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
371 	default:
372 		return -EOPNOTSUPP;
373 	}
374 }
375 EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
376 #endif
377