xref: /linux/io_uring/uring_cmd.c (revision 91928e0d3cc29789f4483bffee5f36218f23942b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/io_uring/net.h>
7 #include <linux/security.h>
8 #include <linux/nospec.h>
9 #include <net/sock.h>
10 
11 #include <uapi/linux/io_uring.h>
12 #include <asm/ioctls.h>
13 
14 #include "io_uring.h"
15 #include "alloc_cache.h"
16 #include "rsrc.h"
17 #include "uring_cmd.h"
18 
io_req_uring_cleanup(struct io_kiocb * req,unsigned int issue_flags)19 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
20 {
21 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
22 	struct io_uring_cmd_data *cache = req->async_data;
23 
24 	if (cache->op_data) {
25 		kfree(cache->op_data);
26 		cache->op_data = NULL;
27 	}
28 
29 	if (issue_flags & IO_URING_F_UNLOCKED)
30 		return;
31 	if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
32 		ioucmd->sqe = NULL;
33 		req->async_data = NULL;
34 		req->flags &= ~REQ_F_ASYNC_DATA;
35 	}
36 }
37 
io_uring_try_cancel_uring_cmd(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)38 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
39 				   struct io_uring_task *tctx, bool cancel_all)
40 {
41 	struct hlist_node *tmp;
42 	struct io_kiocb *req;
43 	bool ret = false;
44 
45 	lockdep_assert_held(&ctx->uring_lock);
46 
47 	hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
48 			hash_node) {
49 		struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
50 				struct io_uring_cmd);
51 		struct file *file = req->file;
52 
53 		if (!cancel_all && req->tctx != tctx)
54 			continue;
55 
56 		if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
57 			file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
58 						   IO_URING_F_COMPLETE_DEFER);
59 			ret = true;
60 		}
61 	}
62 	io_submit_flush_completions(ctx);
63 	return ret;
64 }
65 
io_uring_cmd_del_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)66 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
67 		unsigned int issue_flags)
68 {
69 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
70 	struct io_ring_ctx *ctx = req->ctx;
71 
72 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
73 		return;
74 
75 	cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
76 	io_ring_submit_lock(ctx, issue_flags);
77 	hlist_del(&req->hash_node);
78 	io_ring_submit_unlock(ctx, issue_flags);
79 }
80 
81 /*
82  * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
83  * will try to cancel this issued command by sending ->uring_cmd() with
84  * issue_flags of IO_URING_F_CANCEL.
85  *
86  * The command is guaranteed to not be done when calling ->uring_cmd()
87  * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
88  * with race between io_uring canceling and normal completion.
89  */
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)90 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
91 		unsigned int issue_flags)
92 {
93 	struct io_kiocb *req = cmd_to_io_kiocb(cmd);
94 	struct io_ring_ctx *ctx = req->ctx;
95 
96 	if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
97 		cmd->flags |= IORING_URING_CMD_CANCELABLE;
98 		io_ring_submit_lock(ctx, issue_flags);
99 		hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
100 		io_ring_submit_unlock(ctx, issue_flags);
101 	}
102 }
103 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
104 
io_uring_cmd_work(struct io_kiocb * req,io_tw_token_t tw)105 static void io_uring_cmd_work(struct io_kiocb *req, io_tw_token_t tw)
106 {
107 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
108 	unsigned int flags = IO_URING_F_COMPLETE_DEFER;
109 
110 	if (io_should_terminate_tw())
111 		flags |= IO_URING_F_TASK_DEAD;
112 
113 	/* task_work executor checks the deffered list completion */
114 	ioucmd->task_work_cb(ioucmd, flags);
115 }
116 
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)117 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
118 			void (*task_work_cb)(struct io_uring_cmd *, unsigned),
119 			unsigned flags)
120 {
121 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
122 
123 	ioucmd->task_work_cb = task_work_cb;
124 	req->io_task_work.func = io_uring_cmd_work;
125 	__io_req_task_work_add(req, flags);
126 }
127 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
128 
io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2)129 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
130 					  u64 extra1, u64 extra2)
131 {
132 	req->big_cqe.extra1 = extra1;
133 	req->big_cqe.extra2 = extra2;
134 }
135 
136 /*
137  * Called by consumers of io_uring_cmd, if they originally returned
138  * -EIOCBQUEUED upon receiving the command.
139  */
io_uring_cmd_done(struct io_uring_cmd * ioucmd,ssize_t ret,u64 res2,unsigned issue_flags)140 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
141 		       unsigned issue_flags)
142 {
143 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
144 
145 	io_uring_cmd_del_cancelable(ioucmd, issue_flags);
146 
147 	if (ret < 0)
148 		req_set_fail(req);
149 
150 	io_req_set_res(req, ret, 0);
151 	if (req->ctx->flags & IORING_SETUP_CQE32)
152 		io_req_set_cqe32_extra(req, res2, 0);
153 	io_req_uring_cleanup(req, issue_flags);
154 	if (req->ctx->flags & IORING_SETUP_IOPOLL) {
155 		/* order with io_iopoll_req_issued() checking ->iopoll_complete */
156 		smp_store_release(&req->iopoll_completed, 1);
157 	} else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
158 		if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
159 			return;
160 		io_req_complete_defer(req);
161 	} else {
162 		req->io_task_work.func = io_req_task_complete;
163 		io_req_task_work_add(req);
164 	}
165 }
166 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
167 
io_uring_cmd_prep_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)168 static int io_uring_cmd_prep_setup(struct io_kiocb *req,
169 				   const struct io_uring_sqe *sqe)
170 {
171 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
172 	struct io_uring_cmd_data *cache;
173 
174 	cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req);
175 	if (!cache)
176 		return -ENOMEM;
177 	cache->op_data = NULL;
178 
179 	/*
180 	 * Unconditionally cache the SQE for now - this is only needed for
181 	 * requests that go async, but prep handlers must ensure that any
182 	 * sqe data is stable beyond prep. Since uring_cmd is special in
183 	 * that it doesn't read in per-op data, play it safe and ensure that
184 	 * any SQE data is stable beyond prep. This can later get relaxed.
185 	 */
186 	memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
187 	ioucmd->sqe = cache->sqes;
188 	return 0;
189 }
190 
io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)191 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
192 {
193 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
194 
195 	if (sqe->__pad1)
196 		return -EINVAL;
197 
198 	ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
199 	if (ioucmd->flags & ~IORING_URING_CMD_MASK)
200 		return -EINVAL;
201 
202 	if (ioucmd->flags & IORING_URING_CMD_FIXED)
203 		req->buf_index = READ_ONCE(sqe->buf_index);
204 
205 	ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
206 
207 	return io_uring_cmd_prep_setup(req, sqe);
208 }
209 
io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags)210 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
211 {
212 	struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
213 	struct io_ring_ctx *ctx = req->ctx;
214 	struct file *file = req->file;
215 	int ret;
216 
217 	if (!file->f_op->uring_cmd)
218 		return -EOPNOTSUPP;
219 
220 	ret = security_uring_cmd(ioucmd);
221 	if (ret)
222 		return ret;
223 
224 	if (ctx->flags & IORING_SETUP_SQE128)
225 		issue_flags |= IO_URING_F_SQE128;
226 	if (ctx->flags & IORING_SETUP_CQE32)
227 		issue_flags |= IO_URING_F_CQE32;
228 	if (io_is_compat(ctx))
229 		issue_flags |= IO_URING_F_COMPAT;
230 	if (ctx->flags & IORING_SETUP_IOPOLL) {
231 		if (!file->f_op->uring_cmd_iopoll)
232 			return -EOPNOTSUPP;
233 		issue_flags |= IO_URING_F_IOPOLL;
234 		req->iopoll_completed = 0;
235 	}
236 
237 	ret = file->f_op->uring_cmd(ioucmd, issue_flags);
238 	if (ret == -EAGAIN || ret == -EIOCBQUEUED)
239 		return ret;
240 	if (ret < 0)
241 		req_set_fail(req);
242 	io_req_uring_cleanup(req, issue_flags);
243 	io_req_set_res(req, ret, 0);
244 	return IOU_OK;
245 }
246 
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,struct io_uring_cmd * ioucmd,unsigned int issue_flags)247 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
248 			      struct iov_iter *iter,
249 			      struct io_uring_cmd *ioucmd,
250 			      unsigned int issue_flags)
251 {
252 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
253 
254 	return io_import_reg_buf(req, iter, ubuf, len, rw, issue_flags);
255 }
256 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
257 
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)258 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
259 {
260 	struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
261 
262 	io_req_queue_iowq(req);
263 }
264 
io_uring_cmd_getsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)265 static inline int io_uring_cmd_getsockopt(struct socket *sock,
266 					  struct io_uring_cmd *cmd,
267 					  unsigned int issue_flags)
268 {
269 	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
270 	int optlen, optname, level, err;
271 	void __user *optval;
272 
273 	level = READ_ONCE(cmd->sqe->level);
274 	if (level != SOL_SOCKET)
275 		return -EOPNOTSUPP;
276 
277 	optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
278 	optname = READ_ONCE(cmd->sqe->optname);
279 	optlen = READ_ONCE(cmd->sqe->optlen);
280 
281 	err = do_sock_getsockopt(sock, compat, level, optname,
282 				 USER_SOCKPTR(optval),
283 				 KERNEL_SOCKPTR(&optlen));
284 	if (err)
285 		return err;
286 
287 	/* On success, return optlen */
288 	return optlen;
289 }
290 
io_uring_cmd_setsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)291 static inline int io_uring_cmd_setsockopt(struct socket *sock,
292 					  struct io_uring_cmd *cmd,
293 					  unsigned int issue_flags)
294 {
295 	bool compat = !!(issue_flags & IO_URING_F_COMPAT);
296 	int optname, optlen, level;
297 	void __user *optval;
298 	sockptr_t optval_s;
299 
300 	optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
301 	optname = READ_ONCE(cmd->sqe->optname);
302 	optlen = READ_ONCE(cmd->sqe->optlen);
303 	level = READ_ONCE(cmd->sqe->level);
304 	optval_s = USER_SOCKPTR(optval);
305 
306 	return do_sock_setsockopt(sock, compat, level, optname, optval_s,
307 				  optlen);
308 }
309 
310 #if defined(CONFIG_NET)
io_uring_cmd_sock(struct io_uring_cmd * cmd,unsigned int issue_flags)311 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
312 {
313 	struct socket *sock = cmd->file->private_data;
314 	struct sock *sk = sock->sk;
315 	struct proto *prot = READ_ONCE(sk->sk_prot);
316 	int ret, arg = 0;
317 
318 	if (!prot || !prot->ioctl)
319 		return -EOPNOTSUPP;
320 
321 	switch (cmd->cmd_op) {
322 	case SOCKET_URING_OP_SIOCINQ:
323 		ret = prot->ioctl(sk, SIOCINQ, &arg);
324 		if (ret)
325 			return ret;
326 		return arg;
327 	case SOCKET_URING_OP_SIOCOUTQ:
328 		ret = prot->ioctl(sk, SIOCOUTQ, &arg);
329 		if (ret)
330 			return ret;
331 		return arg;
332 	case SOCKET_URING_OP_GETSOCKOPT:
333 		return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
334 	case SOCKET_URING_OP_SETSOCKOPT:
335 		return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
336 	default:
337 		return -EOPNOTSUPP;
338 	}
339 }
340 EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
341 #endif
342