1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/io_uring/net.h>
7 #include <linux/security.h>
8 #include <linux/nospec.h>
9 #include <net/sock.h>
10
11 #include <uapi/linux/io_uring.h>
12 #include <asm/ioctls.h>
13
14 #include "io_uring.h"
15 #include "alloc_cache.h"
16 #include "rsrc.h"
17 #include "uring_cmd.h"
18
io_req_uring_cleanup(struct io_kiocb * req,unsigned int issue_flags)19 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
20 {
21 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
22 struct io_uring_cmd_data *cache = req->async_data;
23
24 if (cache->op_data) {
25 kfree(cache->op_data);
26 cache->op_data = NULL;
27 }
28
29 if (issue_flags & IO_URING_F_UNLOCKED)
30 return;
31 if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
32 ioucmd->sqe = NULL;
33 req->async_data = NULL;
34 req->flags &= ~REQ_F_ASYNC_DATA;
35 }
36 }
37
io_uring_try_cancel_uring_cmd(struct io_ring_ctx * ctx,struct io_uring_task * tctx,bool cancel_all)38 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
39 struct io_uring_task *tctx, bool cancel_all)
40 {
41 struct hlist_node *tmp;
42 struct io_kiocb *req;
43 bool ret = false;
44
45 lockdep_assert_held(&ctx->uring_lock);
46
47 hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
48 hash_node) {
49 struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
50 struct io_uring_cmd);
51 struct file *file = req->file;
52
53 if (!cancel_all && req->tctx != tctx)
54 continue;
55
56 if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
57 file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
58 IO_URING_F_COMPLETE_DEFER);
59 ret = true;
60 }
61 }
62 io_submit_flush_completions(ctx);
63 return ret;
64 }
65
io_uring_cmd_del_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)66 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
67 unsigned int issue_flags)
68 {
69 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
70 struct io_ring_ctx *ctx = req->ctx;
71
72 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
73 return;
74
75 cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
76 io_ring_submit_lock(ctx, issue_flags);
77 hlist_del(&req->hash_node);
78 io_ring_submit_unlock(ctx, issue_flags);
79 }
80
81 /*
82 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
83 * will try to cancel this issued command by sending ->uring_cmd() with
84 * issue_flags of IO_URING_F_CANCEL.
85 *
86 * The command is guaranteed to not be done when calling ->uring_cmd()
87 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
88 * with race between io_uring canceling and normal completion.
89 */
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)90 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
91 unsigned int issue_flags)
92 {
93 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
94 struct io_ring_ctx *ctx = req->ctx;
95
96 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
97 cmd->flags |= IORING_URING_CMD_CANCELABLE;
98 io_ring_submit_lock(ctx, issue_flags);
99 hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
100 io_ring_submit_unlock(ctx, issue_flags);
101 }
102 }
103 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
104
io_uring_cmd_work(struct io_kiocb * req,struct io_tw_state * ts)105 static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
106 {
107 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
108 unsigned int flags = IO_URING_F_COMPLETE_DEFER;
109
110 if (io_should_terminate_tw())
111 flags |= IO_URING_F_TASK_DEAD;
112
113 /* task_work executor checks the deffered list completion */
114 ioucmd->task_work_cb(ioucmd, flags);
115 }
116
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)117 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
118 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
119 unsigned flags)
120 {
121 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
122
123 ioucmd->task_work_cb = task_work_cb;
124 req->io_task_work.func = io_uring_cmd_work;
125 __io_req_task_work_add(req, flags);
126 }
127 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
128
io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2)129 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
130 u64 extra1, u64 extra2)
131 {
132 req->big_cqe.extra1 = extra1;
133 req->big_cqe.extra2 = extra2;
134 }
135
136 /*
137 * Called by consumers of io_uring_cmd, if they originally returned
138 * -EIOCBQUEUED upon receiving the command.
139 */
io_uring_cmd_done(struct io_uring_cmd * ioucmd,ssize_t ret,u64 res2,unsigned issue_flags)140 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, u64 res2,
141 unsigned issue_flags)
142 {
143 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
144
145 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
146
147 if (ret < 0)
148 req_set_fail(req);
149
150 io_req_set_res(req, ret, 0);
151 if (req->ctx->flags & IORING_SETUP_CQE32)
152 io_req_set_cqe32_extra(req, res2, 0);
153 io_req_uring_cleanup(req, issue_flags);
154 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
155 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
156 smp_store_release(&req->iopoll_completed, 1);
157 } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
158 if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
159 return;
160 io_req_complete_defer(req);
161 } else {
162 req->io_task_work.func = io_req_task_complete;
163 io_req_task_work_add(req);
164 }
165 }
166 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
167
io_uring_cmd_prep_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)168 static int io_uring_cmd_prep_setup(struct io_kiocb *req,
169 const struct io_uring_sqe *sqe)
170 {
171 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
172 struct io_uring_cmd_data *cache;
173
174 cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req);
175 if (!cache)
176 return -ENOMEM;
177 cache->op_data = NULL;
178
179 /*
180 * Unconditionally cache the SQE for now - this is only needed for
181 * requests that go async, but prep handlers must ensure that any
182 * sqe data is stable beyond prep. Since uring_cmd is special in
183 * that it doesn't read in per-op data, play it safe and ensure that
184 * any SQE data is stable beyond prep. This can later get relaxed.
185 */
186 memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
187 ioucmd->sqe = cache->sqes;
188 return 0;
189 }
190
io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)191 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
192 {
193 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
194
195 if (sqe->__pad1)
196 return -EINVAL;
197
198 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
199 if (ioucmd->flags & ~IORING_URING_CMD_MASK)
200 return -EINVAL;
201
202 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
203 struct io_ring_ctx *ctx = req->ctx;
204 struct io_rsrc_node *node;
205 u16 index = READ_ONCE(sqe->buf_index);
206
207 node = io_rsrc_node_lookup(&ctx->buf_table, index);
208 if (unlikely(!node))
209 return -EFAULT;
210 /*
211 * Pi node upfront, prior to io_uring_cmd_import_fixed()
212 * being called. This prevents destruction of the mapped buffer
213 * we'll need at actual import time.
214 */
215 io_req_assign_buf_node(req, node);
216 }
217 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
218
219 return io_uring_cmd_prep_setup(req, sqe);
220 }
221
io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags)222 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
223 {
224 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
225 struct io_ring_ctx *ctx = req->ctx;
226 struct file *file = req->file;
227 int ret;
228
229 if (!file->f_op->uring_cmd)
230 return -EOPNOTSUPP;
231
232 ret = security_uring_cmd(ioucmd);
233 if (ret)
234 return ret;
235
236 if (ctx->flags & IORING_SETUP_SQE128)
237 issue_flags |= IO_URING_F_SQE128;
238 if (ctx->flags & IORING_SETUP_CQE32)
239 issue_flags |= IO_URING_F_CQE32;
240 if (ctx->compat)
241 issue_flags |= IO_URING_F_COMPAT;
242 if (ctx->flags & IORING_SETUP_IOPOLL) {
243 if (!file->f_op->uring_cmd_iopoll)
244 return -EOPNOTSUPP;
245 issue_flags |= IO_URING_F_IOPOLL;
246 req->iopoll_completed = 0;
247 }
248
249 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
250 if (ret == -EAGAIN || ret == -EIOCBQUEUED)
251 return ret;
252 if (ret < 0)
253 req_set_fail(req);
254 io_req_uring_cleanup(req, issue_flags);
255 io_req_set_res(req, ret, 0);
256 return IOU_OK;
257 }
258
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,void * ioucmd)259 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
260 struct iov_iter *iter, void *ioucmd)
261 {
262 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
263 struct io_rsrc_node *node = req->buf_node;
264
265 /* Must have had rsrc_node assigned at prep time */
266 if (node)
267 return io_import_fixed(rw, iter, node->buf, ubuf, len);
268
269 return -EFAULT;
270 }
271 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
272
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)273 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
274 {
275 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
276
277 io_req_queue_iowq(req);
278 }
279
io_uring_cmd_getsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)280 static inline int io_uring_cmd_getsockopt(struct socket *sock,
281 struct io_uring_cmd *cmd,
282 unsigned int issue_flags)
283 {
284 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
285 int optlen, optname, level, err;
286 void __user *optval;
287
288 level = READ_ONCE(cmd->sqe->level);
289 if (level != SOL_SOCKET)
290 return -EOPNOTSUPP;
291
292 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
293 optname = READ_ONCE(cmd->sqe->optname);
294 optlen = READ_ONCE(cmd->sqe->optlen);
295
296 err = do_sock_getsockopt(sock, compat, level, optname,
297 USER_SOCKPTR(optval),
298 KERNEL_SOCKPTR(&optlen));
299 if (err)
300 return err;
301
302 /* On success, return optlen */
303 return optlen;
304 }
305
io_uring_cmd_setsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)306 static inline int io_uring_cmd_setsockopt(struct socket *sock,
307 struct io_uring_cmd *cmd,
308 unsigned int issue_flags)
309 {
310 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
311 int optname, optlen, level;
312 void __user *optval;
313 sockptr_t optval_s;
314
315 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
316 optname = READ_ONCE(cmd->sqe->optname);
317 optlen = READ_ONCE(cmd->sqe->optlen);
318 level = READ_ONCE(cmd->sqe->level);
319 optval_s = USER_SOCKPTR(optval);
320
321 return do_sock_setsockopt(sock, compat, level, optname, optval_s,
322 optlen);
323 }
324
325 #if defined(CONFIG_NET)
io_uring_cmd_sock(struct io_uring_cmd * cmd,unsigned int issue_flags)326 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
327 {
328 struct socket *sock = cmd->file->private_data;
329 struct sock *sk = sock->sk;
330 struct proto *prot = READ_ONCE(sk->sk_prot);
331 int ret, arg = 0;
332
333 if (!prot || !prot->ioctl)
334 return -EOPNOTSUPP;
335
336 switch (cmd->cmd_op) {
337 case SOCKET_URING_OP_SIOCINQ:
338 ret = prot->ioctl(sk, SIOCINQ, &arg);
339 if (ret)
340 return ret;
341 return arg;
342 case SOCKET_URING_OP_SIOCOUTQ:
343 ret = prot->ioctl(sk, SIOCOUTQ, &arg);
344 if (ret)
345 return ret;
346 return arg;
347 case SOCKET_URING_OP_GETSOCKOPT:
348 return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
349 case SOCKET_URING_OP_SETSOCKOPT:
350 return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
351 default:
352 return -EOPNOTSUPP;
353 }
354 }
355 EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
356 #endif
357