xref: /linux/io_uring/msg_ring.c (revision ec16a3cdf37e507013062f9c4a2067eacdd12b62)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 
9 #include <uapi/linux/io_uring.h>
10 
11 #include "io_uring.h"
12 #include "rsrc.h"
13 #include "filetable.h"
14 #include "alloc_cache.h"
15 #include "msg_ring.h"
16 
17 /* All valid masks for MSG_RING */
18 #define IORING_MSG_RING_MASK		(IORING_MSG_RING_CQE_SKIP | \
19 					IORING_MSG_RING_FLAGS_PASS)
20 
21 struct io_msg {
22 	struct file			*file;
23 	struct file			*src_file;
24 	struct callback_head		tw;
25 	u64 user_data;
26 	u32 len;
27 	u32 cmd;
28 	u32 src_fd;
29 	union {
30 		u32 dst_fd;
31 		u32 cqe_flags;
32 	};
33 	u32 flags;
34 };
35 
36 static void io_double_unlock_ctx(struct io_ring_ctx *octx)
37 {
38 	mutex_unlock(&octx->uring_lock);
39 }
40 
41 static int io_double_lock_ctx(struct io_ring_ctx *octx,
42 			      unsigned int issue_flags)
43 {
44 	/*
45 	 * To ensure proper ordering between the two ctxs, we can only
46 	 * attempt a trylock on the target. If that fails and we already have
47 	 * the source ctx lock, punt to io-wq.
48 	 */
49 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
50 		if (!mutex_trylock(&octx->uring_lock))
51 			return -EAGAIN;
52 		return 0;
53 	}
54 	mutex_lock(&octx->uring_lock);
55 	return 0;
56 }
57 
58 void io_msg_ring_cleanup(struct io_kiocb *req)
59 {
60 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
61 
62 	if (WARN_ON_ONCE(!msg->src_file))
63 		return;
64 
65 	fput(msg->src_file);
66 	msg->src_file = NULL;
67 }
68 
69 static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
70 {
71 	return target_ctx->task_complete;
72 }
73 
74 static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
75 {
76 	struct io_ring_ctx *ctx = req->ctx;
77 
78 	io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
79 	if (spin_trylock(&ctx->msg_lock)) {
80 		if (io_alloc_cache_put(&ctx->msg_cache, req))
81 			req = NULL;
82 		spin_unlock(&ctx->msg_lock);
83 	}
84 	if (req)
85 		kmem_cache_free(req_cachep, req);
86 	percpu_ref_put(&ctx->refs);
87 }
88 
89 static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
90 			      int res, u32 cflags, u64 user_data)
91 {
92 	req->tctx = READ_ONCE(ctx->submitter_task->io_uring);
93 	if (!req->tctx) {
94 		kmem_cache_free(req_cachep, req);
95 		return -EOWNERDEAD;
96 	}
97 	req->cqe.user_data = user_data;
98 	io_req_set_res(req, res, cflags);
99 	percpu_ref_get(&ctx->refs);
100 	req->ctx = ctx;
101 	req->io_task_work.func = io_msg_tw_complete;
102 	io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
103 	return 0;
104 }
105 
106 static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
107 {
108 	struct io_kiocb *req = NULL;
109 
110 	if (spin_trylock(&ctx->msg_lock)) {
111 		req = io_alloc_cache_get(&ctx->msg_cache);
112 		spin_unlock(&ctx->msg_lock);
113 		if (req)
114 			return req;
115 	}
116 	return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
117 }
118 
119 static int io_msg_data_remote(struct io_ring_ctx *target_ctx,
120 			      struct io_msg *msg)
121 {
122 	struct io_kiocb *target;
123 	u32 flags = 0;
124 
125 	target = io_msg_get_kiocb(target_ctx);
126 	if (unlikely(!target))
127 		return -ENOMEM;
128 
129 	if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
130 		flags = msg->cqe_flags;
131 
132 	return io_msg_remote_post(target_ctx, target, msg->len, flags,
133 					msg->user_data);
134 }
135 
136 static int __io_msg_ring_data(struct io_ring_ctx *target_ctx,
137 			      struct io_msg *msg, unsigned int issue_flags)
138 {
139 	u32 flags = 0;
140 	int ret;
141 
142 	if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS)
143 		return -EINVAL;
144 	if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
145 		return -EINVAL;
146 	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
147 		return -EBADFD;
148 
149 	if (io_msg_need_remote(target_ctx))
150 		return io_msg_data_remote(target_ctx, msg);
151 
152 	if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
153 		flags = msg->cqe_flags;
154 
155 	ret = -EOVERFLOW;
156 	if (target_ctx->flags & IORING_SETUP_IOPOLL) {
157 		if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
158 			return -EAGAIN;
159 	}
160 	if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
161 		ret = 0;
162 	if (target_ctx->flags & IORING_SETUP_IOPOLL)
163 		io_double_unlock_ctx(target_ctx);
164 	return ret;
165 }
166 
167 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
168 {
169 	struct io_ring_ctx *target_ctx = req->file->private_data;
170 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
171 
172 	return __io_msg_ring_data(target_ctx, msg, issue_flags);
173 }
174 
175 static int io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
176 {
177 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
178 	struct io_ring_ctx *ctx = req->ctx;
179 	struct io_rsrc_node *node;
180 	int ret = -EBADF;
181 
182 	io_ring_submit_lock(ctx, issue_flags);
183 	node = io_rsrc_node_lookup(&ctx->file_table.data, msg->src_fd);
184 	if (node) {
185 		msg->src_file = io_slot_file(node);
186 		if (msg->src_file)
187 			get_file(msg->src_file);
188 		req->flags |= REQ_F_NEED_CLEANUP;
189 		ret = 0;
190 	}
191 	io_ring_submit_unlock(ctx, issue_flags);
192 	return ret;
193 }
194 
195 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
196 {
197 	struct io_ring_ctx *target_ctx = req->file->private_data;
198 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
199 	struct file *src_file = msg->src_file;
200 	int ret;
201 
202 	if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
203 		return -EAGAIN;
204 
205 	ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
206 	if (ret < 0)
207 		goto out_unlock;
208 
209 	msg->src_file = NULL;
210 	req->flags &= ~REQ_F_NEED_CLEANUP;
211 
212 	if (msg->flags & IORING_MSG_RING_CQE_SKIP)
213 		goto out_unlock;
214 	/*
215 	 * If this fails, the target still received the file descriptor but
216 	 * wasn't notified of the fact. This means that if this request
217 	 * completes with -EOVERFLOW, then the sender must ensure that a
218 	 * later IORING_OP_MSG_RING delivers the message.
219 	 */
220 	if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
221 		ret = -EOVERFLOW;
222 out_unlock:
223 	io_double_unlock_ctx(target_ctx);
224 	return ret;
225 }
226 
227 static void io_msg_tw_fd_complete(struct callback_head *head)
228 {
229 	struct io_msg *msg = container_of(head, struct io_msg, tw);
230 	struct io_kiocb *req = cmd_to_io_kiocb(msg);
231 	int ret = -EOWNERDEAD;
232 
233 	if (!(current->flags & PF_EXITING))
234 		ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
235 	if (ret < 0)
236 		req_set_fail(req);
237 	io_req_queue_tw_complete(req, ret);
238 }
239 
240 static int io_msg_fd_remote(struct io_kiocb *req)
241 {
242 	struct io_ring_ctx *ctx = req->file->private_data;
243 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
244 	struct task_struct *task = READ_ONCE(ctx->submitter_task);
245 
246 	if (unlikely(!task))
247 		return -EOWNERDEAD;
248 
249 	init_task_work(&msg->tw, io_msg_tw_fd_complete);
250 	if (task_work_add(task, &msg->tw, TWA_SIGNAL))
251 		return -EOWNERDEAD;
252 
253 	return IOU_ISSUE_SKIP_COMPLETE;
254 }
255 
256 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
257 {
258 	struct io_ring_ctx *target_ctx = req->file->private_data;
259 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
260 	struct io_ring_ctx *ctx = req->ctx;
261 
262 	if (msg->len)
263 		return -EINVAL;
264 	if (target_ctx == ctx)
265 		return -EINVAL;
266 	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
267 		return -EBADFD;
268 	if (!msg->src_file) {
269 		int ret = io_msg_grab_file(req, issue_flags);
270 		if (unlikely(ret))
271 			return ret;
272 	}
273 
274 	if (io_msg_need_remote(target_ctx))
275 		return io_msg_fd_remote(req);
276 	return io_msg_install_complete(req, issue_flags);
277 }
278 
279 static int __io_msg_ring_prep(struct io_msg *msg, const struct io_uring_sqe *sqe)
280 {
281 	if (unlikely(sqe->buf_index || sqe->personality))
282 		return -EINVAL;
283 
284 	msg->src_file = NULL;
285 	msg->user_data = READ_ONCE(sqe->off);
286 	msg->len = READ_ONCE(sqe->len);
287 	msg->cmd = READ_ONCE(sqe->addr);
288 	msg->src_fd = READ_ONCE(sqe->addr3);
289 	msg->dst_fd = READ_ONCE(sqe->file_index);
290 	msg->flags = READ_ONCE(sqe->msg_ring_flags);
291 	if (msg->flags & ~IORING_MSG_RING_MASK)
292 		return -EINVAL;
293 
294 	return 0;
295 }
296 
297 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
298 {
299 	return __io_msg_ring_prep(io_kiocb_to_cmd(req, struct io_msg), sqe);
300 }
301 
302 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
303 {
304 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
305 	int ret;
306 
307 	ret = -EBADFD;
308 	if (!io_is_uring_fops(req->file))
309 		goto done;
310 
311 	switch (msg->cmd) {
312 	case IORING_MSG_DATA:
313 		ret = io_msg_ring_data(req, issue_flags);
314 		break;
315 	case IORING_MSG_SEND_FD:
316 		ret = io_msg_send_fd(req, issue_flags);
317 		break;
318 	default:
319 		ret = -EINVAL;
320 		break;
321 	}
322 
323 done:
324 	if (ret < 0) {
325 		if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
326 			return ret;
327 		req_set_fail(req);
328 	}
329 	io_req_set_res(req, ret, 0);
330 	return IOU_OK;
331 }
332 
333 int io_uring_sync_msg_ring(struct io_uring_sqe *sqe)
334 {
335 	struct io_msg io_msg = { };
336 	int ret;
337 
338 	ret = __io_msg_ring_prep(&io_msg, sqe);
339 	if (unlikely(ret))
340 		return ret;
341 
342 	/*
343 	 * Only data sending supported, not IORING_MSG_SEND_FD as that one
344 	 * doesn't make sense without a source ring to send files from.
345 	 */
346 	if (io_msg.cmd != IORING_MSG_DATA)
347 		return -EINVAL;
348 
349 	CLASS(fd, f)(sqe->fd);
350 	if (fd_empty(f))
351 		return -EBADF;
352 	if (!io_is_uring_fops(fd_file(f)))
353 		return -EBADFD;
354 	return  __io_msg_ring_data(fd_file(f)->private_data,
355 				   &io_msg, IO_URING_F_UNLOCKED);
356 }
357 
358 void io_msg_cache_free(const void *entry)
359 {
360 	struct io_kiocb *req = (struct io_kiocb *) entry;
361 
362 	kmem_cache_free(req_cachep, req);
363 }
364