xref: /linux/io_uring/msg_ring.c (revision 811f35ff59b6f99ae272d6f5b96bc9e974f88196)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 
9 #include <uapi/linux/io_uring.h>
10 
11 #include "io_uring.h"
12 #include "rsrc.h"
13 #include "filetable.h"
14 #include "msg_ring.h"
15 
16 struct io_msg {
17 	struct file			*file;
18 	struct file			*src_file;
19 	struct callback_head		tw;
20 	u64 user_data;
21 	u32 len;
22 	u32 cmd;
23 	u32 src_fd;
24 	u32 dst_fd;
25 	u32 flags;
26 };
27 
28 static void io_double_unlock_ctx(struct io_ring_ctx *octx)
29 {
30 	mutex_unlock(&octx->uring_lock);
31 }
32 
33 static int io_double_lock_ctx(struct io_ring_ctx *octx,
34 			      unsigned int issue_flags)
35 {
36 	/*
37 	 * To ensure proper ordering between the two ctxs, we can only
38 	 * attempt a trylock on the target. If that fails and we already have
39 	 * the source ctx lock, punt to io-wq.
40 	 */
41 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
42 		if (!mutex_trylock(&octx->uring_lock))
43 			return -EAGAIN;
44 		return 0;
45 	}
46 	mutex_lock(&octx->uring_lock);
47 	return 0;
48 }
49 
50 void io_msg_ring_cleanup(struct io_kiocb *req)
51 {
52 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
53 
54 	if (WARN_ON_ONCE(!msg->src_file))
55 		return;
56 
57 	fput(msg->src_file);
58 	msg->src_file = NULL;
59 }
60 
61 static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
62 {
63 	if (!target_ctx->task_complete)
64 		return false;
65 	return current != target_ctx->submitter_task;
66 }
67 
68 static int io_msg_exec_remote(struct io_kiocb *req, task_work_func_t func)
69 {
70 	struct io_ring_ctx *ctx = req->file->private_data;
71 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
72 	struct task_struct *task = READ_ONCE(ctx->submitter_task);
73 
74 	if (unlikely(!task))
75 		return -EOWNERDEAD;
76 
77 	init_task_work(&msg->tw, func);
78 	if (task_work_add(ctx->submitter_task, &msg->tw, TWA_SIGNAL))
79 		return -EOWNERDEAD;
80 
81 	return IOU_ISSUE_SKIP_COMPLETE;
82 }
83 
84 static void io_msg_tw_complete(struct callback_head *head)
85 {
86 	struct io_msg *msg = container_of(head, struct io_msg, tw);
87 	struct io_kiocb *req = cmd_to_io_kiocb(msg);
88 	struct io_ring_ctx *target_ctx = req->file->private_data;
89 	int ret = 0;
90 
91 	if (current->flags & PF_EXITING) {
92 		ret = -EOWNERDEAD;
93 	} else {
94 		/*
95 		 * If the target ring is using IOPOLL mode, then we need to be
96 		 * holding the uring_lock for posting completions. Other ring
97 		 * types rely on the regular completion locking, which is
98 		 * handled while posting.
99 		 */
100 		if (target_ctx->flags & IORING_SETUP_IOPOLL)
101 			mutex_lock(&target_ctx->uring_lock);
102 		if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
103 			ret = -EOVERFLOW;
104 		if (target_ctx->flags & IORING_SETUP_IOPOLL)
105 			mutex_unlock(&target_ctx->uring_lock);
106 	}
107 
108 	if (ret < 0)
109 		req_set_fail(req);
110 	io_req_queue_tw_complete(req, ret);
111 }
112 
113 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
114 {
115 	struct io_ring_ctx *target_ctx = req->file->private_data;
116 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
117 	int ret;
118 
119 	if (msg->src_fd || msg->dst_fd || msg->flags)
120 		return -EINVAL;
121 	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
122 		return -EBADFD;
123 
124 	if (io_msg_need_remote(target_ctx))
125 		return io_msg_exec_remote(req, io_msg_tw_complete);
126 
127 	ret = -EOVERFLOW;
128 	if (target_ctx->flags & IORING_SETUP_IOPOLL) {
129 		if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
130 			return -EAGAIN;
131 		if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
132 			ret = 0;
133 		io_double_unlock_ctx(target_ctx);
134 	} else {
135 		if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
136 			ret = 0;
137 	}
138 	return ret;
139 }
140 
141 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
142 {
143 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
144 	struct io_ring_ctx *ctx = req->ctx;
145 	struct file *file = NULL;
146 	unsigned long file_ptr;
147 	int idx = msg->src_fd;
148 
149 	io_ring_submit_lock(ctx, issue_flags);
150 	if (likely(idx < ctx->nr_user_files)) {
151 		idx = array_index_nospec(idx, ctx->nr_user_files);
152 		file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
153 		file = (struct file *) (file_ptr & FFS_MASK);
154 		if (file)
155 			get_file(file);
156 	}
157 	io_ring_submit_unlock(ctx, issue_flags);
158 	return file;
159 }
160 
161 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
162 {
163 	struct io_ring_ctx *target_ctx = req->file->private_data;
164 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
165 	struct file *src_file = msg->src_file;
166 	int ret;
167 
168 	if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
169 		return -EAGAIN;
170 
171 	ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
172 	if (ret < 0)
173 		goto out_unlock;
174 
175 	msg->src_file = NULL;
176 	req->flags &= ~REQ_F_NEED_CLEANUP;
177 
178 	if (msg->flags & IORING_MSG_RING_CQE_SKIP)
179 		goto out_unlock;
180 	/*
181 	 * If this fails, the target still received the file descriptor but
182 	 * wasn't notified of the fact. This means that if this request
183 	 * completes with -EOVERFLOW, then the sender must ensure that a
184 	 * later IORING_OP_MSG_RING delivers the message.
185 	 */
186 	if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
187 		ret = -EOVERFLOW;
188 out_unlock:
189 	io_double_unlock_ctx(target_ctx);
190 	return ret;
191 }
192 
193 static void io_msg_tw_fd_complete(struct callback_head *head)
194 {
195 	struct io_msg *msg = container_of(head, struct io_msg, tw);
196 	struct io_kiocb *req = cmd_to_io_kiocb(msg);
197 	int ret = -EOWNERDEAD;
198 
199 	if (!(current->flags & PF_EXITING))
200 		ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
201 	if (ret < 0)
202 		req_set_fail(req);
203 	io_req_queue_tw_complete(req, ret);
204 }
205 
206 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
207 {
208 	struct io_ring_ctx *target_ctx = req->file->private_data;
209 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
210 	struct io_ring_ctx *ctx = req->ctx;
211 	struct file *src_file = msg->src_file;
212 
213 	if (target_ctx == ctx)
214 		return -EINVAL;
215 	if (target_ctx->flags & IORING_SETUP_R_DISABLED)
216 		return -EBADFD;
217 	if (!src_file) {
218 		src_file = io_msg_grab_file(req, issue_flags);
219 		if (!src_file)
220 			return -EBADF;
221 		msg->src_file = src_file;
222 		req->flags |= REQ_F_NEED_CLEANUP;
223 	}
224 
225 	if (io_msg_need_remote(target_ctx))
226 		return io_msg_exec_remote(req, io_msg_tw_fd_complete);
227 	return io_msg_install_complete(req, issue_flags);
228 }
229 
230 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
231 {
232 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
233 
234 	if (unlikely(sqe->buf_index || sqe->personality))
235 		return -EINVAL;
236 
237 	msg->src_file = NULL;
238 	msg->user_data = READ_ONCE(sqe->off);
239 	msg->len = READ_ONCE(sqe->len);
240 	msg->cmd = READ_ONCE(sqe->addr);
241 	msg->src_fd = READ_ONCE(sqe->addr3);
242 	msg->dst_fd = READ_ONCE(sqe->file_index);
243 	msg->flags = READ_ONCE(sqe->msg_ring_flags);
244 	if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
245 		return -EINVAL;
246 
247 	return 0;
248 }
249 
250 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
251 {
252 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
253 	int ret;
254 
255 	ret = -EBADFD;
256 	if (!io_is_uring_fops(req->file))
257 		goto done;
258 
259 	switch (msg->cmd) {
260 	case IORING_MSG_DATA:
261 		ret = io_msg_ring_data(req, issue_flags);
262 		break;
263 	case IORING_MSG_SEND_FD:
264 		ret = io_msg_send_fd(req, issue_flags);
265 		break;
266 	default:
267 		ret = -EINVAL;
268 		break;
269 	}
270 
271 done:
272 	if (ret < 0) {
273 		if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
274 			return ret;
275 		req_set_fail(req);
276 	}
277 	io_req_set_res(req, ret, 0);
278 	return IOU_OK;
279 }
280