1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring/cmd.h>
6 #include <linux/io_uring/net.h>
7 #include <linux/security.h>
8 #include <linux/nospec.h>
9 #include <net/sock.h>
10
11 #include <uapi/linux/io_uring.h>
12 #include <asm/ioctls.h>
13
14 #include "io_uring.h"
15 #include "alloc_cache.h"
16 #include "rsrc.h"
17 #include "uring_cmd.h"
18
io_uring_async_get(struct io_kiocb * req)19 static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
20 {
21 struct io_ring_ctx *ctx = req->ctx;
22 struct uring_cache *cache;
23
24 cache = io_alloc_cache_get(&ctx->uring_cache);
25 if (cache) {
26 req->flags |= REQ_F_ASYNC_DATA;
27 req->async_data = cache;
28 return cache;
29 }
30 if (!io_alloc_async_data(req))
31 return req->async_data;
32 return NULL;
33 }
34
io_req_uring_cleanup(struct io_kiocb * req,unsigned int issue_flags)35 static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
36 {
37 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
38 struct uring_cache *cache = req->async_data;
39
40 if (issue_flags & IO_URING_F_UNLOCKED)
41 return;
42 if (io_alloc_cache_put(&req->ctx->uring_cache, cache)) {
43 ioucmd->sqe = NULL;
44 req->async_data = NULL;
45 req->flags &= ~REQ_F_ASYNC_DATA;
46 }
47 }
48
io_uring_try_cancel_uring_cmd(struct io_ring_ctx * ctx,struct task_struct * task,bool cancel_all)49 bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
50 struct task_struct *task, bool cancel_all)
51 {
52 struct hlist_node *tmp;
53 struct io_kiocb *req;
54 bool ret = false;
55
56 lockdep_assert_held(&ctx->uring_lock);
57
58 hlist_for_each_entry_safe(req, tmp, &ctx->cancelable_uring_cmd,
59 hash_node) {
60 struct io_uring_cmd *cmd = io_kiocb_to_cmd(req,
61 struct io_uring_cmd);
62 struct file *file = req->file;
63
64 if (!cancel_all && req->task != task)
65 continue;
66
67 if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
68 /* ->sqe isn't available if no async data */
69 if (!req_has_async_data(req))
70 cmd->sqe = NULL;
71 file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
72 IO_URING_F_COMPLETE_DEFER);
73 ret = true;
74 }
75 }
76 io_submit_flush_completions(ctx);
77 return ret;
78 }
79
io_uring_cmd_del_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)80 static void io_uring_cmd_del_cancelable(struct io_uring_cmd *cmd,
81 unsigned int issue_flags)
82 {
83 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
84 struct io_ring_ctx *ctx = req->ctx;
85
86 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE))
87 return;
88
89 cmd->flags &= ~IORING_URING_CMD_CANCELABLE;
90 io_ring_submit_lock(ctx, issue_flags);
91 hlist_del(&req->hash_node);
92 io_ring_submit_unlock(ctx, issue_flags);
93 }
94
95 /*
96 * Mark this command as concelable, then io_uring_try_cancel_uring_cmd()
97 * will try to cancel this issued command by sending ->uring_cmd() with
98 * issue_flags of IO_URING_F_CANCEL.
99 *
100 * The command is guaranteed to not be done when calling ->uring_cmd()
101 * with IO_URING_F_CANCEL, but it is driver's responsibility to deal
102 * with race between io_uring canceling and normal completion.
103 */
io_uring_cmd_mark_cancelable(struct io_uring_cmd * cmd,unsigned int issue_flags)104 void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
105 unsigned int issue_flags)
106 {
107 struct io_kiocb *req = cmd_to_io_kiocb(cmd);
108 struct io_ring_ctx *ctx = req->ctx;
109
110 if (!(cmd->flags & IORING_URING_CMD_CANCELABLE)) {
111 cmd->flags |= IORING_URING_CMD_CANCELABLE;
112 io_ring_submit_lock(ctx, issue_flags);
113 hlist_add_head(&req->hash_node, &ctx->cancelable_uring_cmd);
114 io_ring_submit_unlock(ctx, issue_flags);
115 }
116 }
117 EXPORT_SYMBOL_GPL(io_uring_cmd_mark_cancelable);
118
io_uring_cmd_work(struct io_kiocb * req,struct io_tw_state * ts)119 static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
120 {
121 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
122
123 /* task_work executor checks the deffered list completion */
124 ioucmd->task_work_cb(ioucmd, IO_URING_F_COMPLETE_DEFER);
125 }
126
__io_uring_cmd_do_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned),unsigned flags)127 void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd,
128 void (*task_work_cb)(struct io_uring_cmd *, unsigned),
129 unsigned flags)
130 {
131 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
132
133 ioucmd->task_work_cb = task_work_cb;
134 req->io_task_work.func = io_uring_cmd_work;
135 __io_req_task_work_add(req, flags);
136 }
137 EXPORT_SYMBOL_GPL(__io_uring_cmd_do_in_task);
138
io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2)139 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
140 u64 extra1, u64 extra2)
141 {
142 req->big_cqe.extra1 = extra1;
143 req->big_cqe.extra2 = extra2;
144 }
145
146 /*
147 * Called by consumers of io_uring_cmd, if they originally returned
148 * -EIOCBQUEUED upon receiving the command.
149 */
io_uring_cmd_done(struct io_uring_cmd * ioucmd,ssize_t ret,ssize_t res2,unsigned issue_flags)150 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
151 unsigned issue_flags)
152 {
153 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
154
155 io_uring_cmd_del_cancelable(ioucmd, issue_flags);
156
157 if (ret < 0)
158 req_set_fail(req);
159
160 io_req_set_res(req, ret, 0);
161 if (req->ctx->flags & IORING_SETUP_CQE32)
162 io_req_set_cqe32_extra(req, res2, 0);
163 io_req_uring_cleanup(req, issue_flags);
164 if (req->ctx->flags & IORING_SETUP_IOPOLL) {
165 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
166 smp_store_release(&req->iopoll_completed, 1);
167 } else if (issue_flags & IO_URING_F_COMPLETE_DEFER) {
168 if (WARN_ON_ONCE(issue_flags & IO_URING_F_UNLOCKED))
169 return;
170 io_req_complete_defer(req);
171 } else {
172 req->io_task_work.func = io_req_task_complete;
173 io_req_task_work_add(req);
174 }
175 }
176 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
177
io_uring_cmd_prep_setup(struct io_kiocb * req,const struct io_uring_sqe * sqe)178 static int io_uring_cmd_prep_setup(struct io_kiocb *req,
179 const struct io_uring_sqe *sqe)
180 {
181 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
182 struct uring_cache *cache;
183
184 cache = io_uring_async_get(req);
185 if (unlikely(!cache))
186 return -ENOMEM;
187
188 if (!(req->flags & REQ_F_FORCE_ASYNC)) {
189 /* defer memcpy until we need it */
190 ioucmd->sqe = sqe;
191 return 0;
192 }
193
194 memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
195 ioucmd->sqe = req->async_data;
196 return 0;
197 }
198
io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)199 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
200 {
201 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
202
203 if (sqe->__pad1)
204 return -EINVAL;
205
206 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
207 if (ioucmd->flags & ~IORING_URING_CMD_MASK)
208 return -EINVAL;
209
210 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
211 struct io_ring_ctx *ctx = req->ctx;
212 u16 index;
213
214 req->buf_index = READ_ONCE(sqe->buf_index);
215 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
216 return -EFAULT;
217 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
218 req->imu = ctx->user_bufs[index];
219 io_req_set_rsrc_node(req, ctx, 0);
220 }
221 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
222
223 return io_uring_cmd_prep_setup(req, sqe);
224 }
225
io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags)226 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
227 {
228 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
229 struct io_ring_ctx *ctx = req->ctx;
230 struct file *file = req->file;
231 int ret;
232
233 if (!file->f_op->uring_cmd)
234 return -EOPNOTSUPP;
235
236 ret = security_uring_cmd(ioucmd);
237 if (ret)
238 return ret;
239
240 if (ctx->flags & IORING_SETUP_SQE128)
241 issue_flags |= IO_URING_F_SQE128;
242 if (ctx->flags & IORING_SETUP_CQE32)
243 issue_flags |= IO_URING_F_CQE32;
244 if (ctx->compat)
245 issue_flags |= IO_URING_F_COMPAT;
246 if (ctx->flags & IORING_SETUP_IOPOLL) {
247 if (!file->f_op->uring_cmd_iopoll)
248 return -EOPNOTSUPP;
249 issue_flags |= IO_URING_F_IOPOLL;
250 req->iopoll_completed = 0;
251 }
252
253 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
254 if (ret == -EAGAIN) {
255 struct uring_cache *cache = req->async_data;
256
257 if (ioucmd->sqe != (void *) cache)
258 memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
259 return -EAGAIN;
260 } else if (ret == -EIOCBQUEUED) {
261 return -EIOCBQUEUED;
262 }
263
264 if (ret < 0)
265 req_set_fail(req);
266 io_req_uring_cleanup(req, issue_flags);
267 io_req_set_res(req, ret, 0);
268 return IOU_OK;
269 }
270
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,void * ioucmd)271 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
272 struct iov_iter *iter, void *ioucmd)
273 {
274 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
275
276 return io_import_fixed(rw, iter, req->imu, ubuf, len);
277 }
278 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
279
io_uring_cmd_issue_blocking(struct io_uring_cmd * ioucmd)280 void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd)
281 {
282 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
283
284 io_req_queue_iowq(req);
285 }
286
io_uring_cmd_getsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)287 static inline int io_uring_cmd_getsockopt(struct socket *sock,
288 struct io_uring_cmd *cmd,
289 unsigned int issue_flags)
290 {
291 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
292 int optlen, optname, level, err;
293 void __user *optval;
294
295 level = READ_ONCE(cmd->sqe->level);
296 if (level != SOL_SOCKET)
297 return -EOPNOTSUPP;
298
299 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
300 optname = READ_ONCE(cmd->sqe->optname);
301 optlen = READ_ONCE(cmd->sqe->optlen);
302
303 err = do_sock_getsockopt(sock, compat, level, optname,
304 USER_SOCKPTR(optval),
305 KERNEL_SOCKPTR(&optlen));
306 if (err)
307 return err;
308
309 /* On success, return optlen */
310 return optlen;
311 }
312
io_uring_cmd_setsockopt(struct socket * sock,struct io_uring_cmd * cmd,unsigned int issue_flags)313 static inline int io_uring_cmd_setsockopt(struct socket *sock,
314 struct io_uring_cmd *cmd,
315 unsigned int issue_flags)
316 {
317 bool compat = !!(issue_flags & IO_URING_F_COMPAT);
318 int optname, optlen, level;
319 void __user *optval;
320 sockptr_t optval_s;
321
322 optval = u64_to_user_ptr(READ_ONCE(cmd->sqe->optval));
323 optname = READ_ONCE(cmd->sqe->optname);
324 optlen = READ_ONCE(cmd->sqe->optlen);
325 level = READ_ONCE(cmd->sqe->level);
326 optval_s = USER_SOCKPTR(optval);
327
328 return do_sock_setsockopt(sock, compat, level, optname, optval_s,
329 optlen);
330 }
331
332 #if defined(CONFIG_NET)
io_uring_cmd_sock(struct io_uring_cmd * cmd,unsigned int issue_flags)333 int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
334 {
335 struct socket *sock = cmd->file->private_data;
336 struct sock *sk = sock->sk;
337 struct proto *prot = READ_ONCE(sk->sk_prot);
338 int ret, arg = 0;
339
340 if (!prot || !prot->ioctl)
341 return -EOPNOTSUPP;
342
343 switch (cmd->sqe->cmd_op) {
344 case SOCKET_URING_OP_SIOCINQ:
345 ret = prot->ioctl(sk, SIOCINQ, &arg);
346 if (ret)
347 return ret;
348 return arg;
349 case SOCKET_URING_OP_SIOCOUTQ:
350 ret = prot->ioctl(sk, SIOCOUTQ, &arg);
351 if (ret)
352 return ret;
353 return arg;
354 case SOCKET_URING_OP_GETSOCKOPT:
355 return io_uring_cmd_getsockopt(sock, cmd, issue_flags);
356 case SOCKET_URING_OP_SETSOCKOPT:
357 return io_uring_cmd_setsockopt(sock, cmd, issue_flags);
358 default:
359 return -EOPNOTSUPP;
360 }
361 }
362 EXPORT_SYMBOL_GPL(io_uring_cmd_sock);
363 #endif
364