xref: /linux/io_uring/msg_ring.c (revision 497e6b37b0099dc415578488287fd84fb74433eb)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8 
9 #include <uapi/linux/io_uring.h>
10 
11 #include "io_uring.h"
12 #include "rsrc.h"
13 #include "filetable.h"
14 #include "msg_ring.h"
15 
16 struct io_msg {
17 	struct file			*file;
18 	struct file			*src_file;
19 	struct callback_head		tw;
20 	u64 user_data;
21 	u32 len;
22 	u32 cmd;
23 	u32 src_fd;
24 	u32 dst_fd;
25 	u32 flags;
26 };
27 
28 void io_msg_ring_cleanup(struct io_kiocb *req)
29 {
30 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
31 
32 	if (WARN_ON_ONCE(!msg->src_file))
33 		return;
34 
35 	fput(msg->src_file);
36 	msg->src_file = NULL;
37 }
38 
39 static void io_msg_tw_complete(struct callback_head *head)
40 {
41 	struct io_msg *msg = container_of(head, struct io_msg, tw);
42 	struct io_kiocb *req = cmd_to_io_kiocb(msg);
43 	struct io_ring_ctx *target_ctx = req->file->private_data;
44 	int ret = 0;
45 
46 	if (current->flags & PF_EXITING)
47 		ret = -EOWNERDEAD;
48 	else if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
49 		ret = -EOVERFLOW;
50 
51 	if (ret < 0)
52 		req_set_fail(req);
53 	io_req_queue_tw_complete(req, ret);
54 }
55 
56 static int io_msg_ring_data(struct io_kiocb *req)
57 {
58 	struct io_ring_ctx *target_ctx = req->file->private_data;
59 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
60 
61 	if (msg->src_fd || msg->dst_fd || msg->flags)
62 		return -EINVAL;
63 
64 	if (target_ctx->task_complete && current != target_ctx->submitter_task) {
65 		init_task_work(&msg->tw, io_msg_tw_complete);
66 		if (task_work_add(target_ctx->submitter_task, &msg->tw,
67 				  TWA_SIGNAL_NO_IPI))
68 			return -EOWNERDEAD;
69 
70 		atomic_or(IORING_SQ_TASKRUN, &target_ctx->rings->sq_flags);
71 		return IOU_ISSUE_SKIP_COMPLETE;
72 	}
73 
74 	if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
75 		return 0;
76 
77 	return -EOVERFLOW;
78 }
79 
80 static void io_double_unlock_ctx(struct io_ring_ctx *octx,
81 				 unsigned int issue_flags)
82 {
83 	mutex_unlock(&octx->uring_lock);
84 }
85 
86 static int io_double_lock_ctx(struct io_ring_ctx *octx,
87 			      unsigned int issue_flags)
88 {
89 	/*
90 	 * To ensure proper ordering between the two ctxs, we can only
91 	 * attempt a trylock on the target. If that fails and we already have
92 	 * the source ctx lock, punt to io-wq.
93 	 */
94 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
95 		if (!mutex_trylock(&octx->uring_lock))
96 			return -EAGAIN;
97 		return 0;
98 	}
99 	mutex_lock(&octx->uring_lock);
100 	return 0;
101 }
102 
103 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
104 {
105 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
106 	struct io_ring_ctx *ctx = req->ctx;
107 	struct file *file = NULL;
108 	unsigned long file_ptr;
109 	int idx = msg->src_fd;
110 
111 	io_ring_submit_lock(ctx, issue_flags);
112 	if (likely(idx < ctx->nr_user_files)) {
113 		idx = array_index_nospec(idx, ctx->nr_user_files);
114 		file_ptr = io_fixed_file_slot(&ctx->file_table, idx)->file_ptr;
115 		file = (struct file *) (file_ptr & FFS_MASK);
116 		if (file)
117 			get_file(file);
118 	}
119 	io_ring_submit_unlock(ctx, issue_flags);
120 	return file;
121 }
122 
123 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
124 {
125 	struct io_ring_ctx *target_ctx = req->file->private_data;
126 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
127 	struct file *src_file = msg->src_file;
128 	int ret;
129 
130 	if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
131 		return -EAGAIN;
132 
133 	ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
134 	if (ret < 0)
135 		goto out_unlock;
136 
137 	msg->src_file = NULL;
138 	req->flags &= ~REQ_F_NEED_CLEANUP;
139 
140 	if (msg->flags & IORING_MSG_RING_CQE_SKIP)
141 		goto out_unlock;
142 	/*
143 	 * If this fails, the target still received the file descriptor but
144 	 * wasn't notified of the fact. This means that if this request
145 	 * completes with -EOVERFLOW, then the sender must ensure that a
146 	 * later IORING_OP_MSG_RING delivers the message.
147 	 */
148 	if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0))
149 		ret = -EOVERFLOW;
150 out_unlock:
151 	io_double_unlock_ctx(target_ctx, issue_flags);
152 	return ret;
153 }
154 
155 static void io_msg_tw_fd_complete(struct callback_head *head)
156 {
157 	struct io_msg *msg = container_of(head, struct io_msg, tw);
158 	struct io_kiocb *req = cmd_to_io_kiocb(msg);
159 	int ret = -EOWNERDEAD;
160 
161 	if (!(current->flags & PF_EXITING))
162 		ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
163 	if (ret < 0)
164 		req_set_fail(req);
165 	io_req_queue_tw_complete(req, ret);
166 }
167 
168 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
169 {
170 	struct io_ring_ctx *target_ctx = req->file->private_data;
171 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
172 	struct io_ring_ctx *ctx = req->ctx;
173 	struct file *src_file = msg->src_file;
174 
175 	if (target_ctx == ctx)
176 		return -EINVAL;
177 	if (!src_file) {
178 		src_file = io_msg_grab_file(req, issue_flags);
179 		if (!src_file)
180 			return -EBADF;
181 		msg->src_file = src_file;
182 		req->flags |= REQ_F_NEED_CLEANUP;
183 	}
184 
185 	if (target_ctx->task_complete && current != target_ctx->submitter_task) {
186 		init_task_work(&msg->tw, io_msg_tw_fd_complete);
187 		if (task_work_add(target_ctx->submitter_task, &msg->tw,
188 				  TWA_SIGNAL))
189 			return -EOWNERDEAD;
190 
191 		return IOU_ISSUE_SKIP_COMPLETE;
192 	}
193 	return io_msg_install_complete(req, issue_flags);
194 }
195 
196 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
197 {
198 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
199 
200 	if (unlikely(sqe->buf_index || sqe->personality))
201 		return -EINVAL;
202 
203 	msg->src_file = NULL;
204 	msg->user_data = READ_ONCE(sqe->off);
205 	msg->len = READ_ONCE(sqe->len);
206 	msg->cmd = READ_ONCE(sqe->addr);
207 	msg->src_fd = READ_ONCE(sqe->addr3);
208 	msg->dst_fd = READ_ONCE(sqe->file_index);
209 	msg->flags = READ_ONCE(sqe->msg_ring_flags);
210 	if (msg->flags & ~IORING_MSG_RING_CQE_SKIP)
211 		return -EINVAL;
212 
213 	return 0;
214 }
215 
216 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
217 {
218 	struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
219 	int ret;
220 
221 	ret = -EBADFD;
222 	if (!io_is_uring_fops(req->file))
223 		goto done;
224 
225 	switch (msg->cmd) {
226 	case IORING_MSG_DATA:
227 		ret = io_msg_ring_data(req);
228 		break;
229 	case IORING_MSG_SEND_FD:
230 		ret = io_msg_send_fd(req, issue_flags);
231 		break;
232 	default:
233 		ret = -EINVAL;
234 		break;
235 	}
236 
237 done:
238 	if (ret < 0) {
239 		if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
240 			return ret;
241 		req_set_fail(req);
242 	}
243 	io_req_set_res(req, ret, 0);
244 	return IOU_OK;
245 }
246