1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/slab.h> 6 #include <linux/nospec.h> 7 #include <linux/io_uring.h> 8 9 #include <uapi/linux/io_uring.h> 10 11 #include "io_uring.h" 12 #include "rsrc.h" 13 #include "filetable.h" 14 #include "msg_ring.h" 15 16 struct io_msg { 17 struct file *file; 18 u64 user_data; 19 u32 len; 20 u32 cmd; 21 u32 src_fd; 22 u32 dst_fd; 23 u32 flags; 24 }; 25 26 static int io_msg_ring_data(struct io_kiocb *req) 27 { 28 struct io_ring_ctx *target_ctx = req->file->private_data; 29 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 30 31 if (msg->src_fd || msg->dst_fd || msg->flags) 32 return -EINVAL; 33 34 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) 35 return 0; 36 37 return -EOVERFLOW; 38 } 39 40 static void io_double_unlock_ctx(struct io_ring_ctx *ctx, 41 struct io_ring_ctx *octx, 42 unsigned int issue_flags) 43 { 44 if (issue_flags & IO_URING_F_UNLOCKED) 45 mutex_unlock(&ctx->uring_lock); 46 mutex_unlock(&octx->uring_lock); 47 } 48 49 static int io_double_lock_ctx(struct io_ring_ctx *ctx, 50 struct io_ring_ctx *octx, 51 unsigned int issue_flags) 52 { 53 /* 54 * To ensure proper ordering between the two ctxs, we can only 55 * attempt a trylock on the target. If that fails and we already have 56 * the source ctx lock, punt to io-wq. 57 */ 58 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 59 if (!mutex_trylock(&octx->uring_lock)) 60 return -EAGAIN; 61 return 0; 62 } 63 64 /* Always grab smallest value ctx first. We know ctx != octx. */ 65 if (ctx < octx) { 66 mutex_lock(&ctx->uring_lock); 67 mutex_lock(&octx->uring_lock); 68 } else { 69 mutex_lock(&octx->uring_lock); 70 mutex_lock(&ctx->uring_lock); 71 } 72 73 return 0; 74 } 75 76 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) 77 { 78 struct io_ring_ctx *target_ctx = req->file->private_data; 79 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 80 struct io_ring_ctx *ctx = req->ctx; 81 unsigned long file_ptr; 82 struct file *src_file; 83 int ret; 84 85 if (target_ctx == ctx) 86 return -EINVAL; 87 88 ret = io_double_lock_ctx(ctx, target_ctx, issue_flags); 89 if (unlikely(ret)) 90 return ret; 91 92 ret = -EBADF; 93 if (unlikely(msg->src_fd >= ctx->nr_user_files)) 94 goto out_unlock; 95 96 msg->src_fd = array_index_nospec(msg->src_fd, ctx->nr_user_files); 97 file_ptr = io_fixed_file_slot(&ctx->file_table, msg->src_fd)->file_ptr; 98 if (!file_ptr) 99 goto out_unlock; 100 101 src_file = (struct file *) (file_ptr & FFS_MASK); 102 get_file(src_file); 103 104 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd); 105 if (ret < 0) { 106 fput(src_file); 107 goto out_unlock; 108 } 109 110 if (msg->flags & IORING_MSG_RING_CQE_SKIP) 111 goto out_unlock; 112 113 /* 114 * If this fails, the target still received the file descriptor but 115 * wasn't notified of the fact. This means that if this request 116 * completes with -EOVERFLOW, then the sender must ensure that a 117 * later IORING_OP_MSG_RING delivers the message. 118 */ 119 if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) 120 ret = -EOVERFLOW; 121 out_unlock: 122 io_double_unlock_ctx(ctx, target_ctx, issue_flags); 123 return ret; 124 } 125 126 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 127 { 128 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 129 130 if (unlikely(sqe->buf_index || sqe->personality)) 131 return -EINVAL; 132 133 msg->user_data = READ_ONCE(sqe->off); 134 msg->len = READ_ONCE(sqe->len); 135 msg->cmd = READ_ONCE(sqe->addr); 136 msg->src_fd = READ_ONCE(sqe->addr3); 137 msg->dst_fd = READ_ONCE(sqe->file_index); 138 msg->flags = READ_ONCE(sqe->msg_ring_flags); 139 if (msg->flags & ~IORING_MSG_RING_CQE_SKIP) 140 return -EINVAL; 141 142 return 0; 143 } 144 145 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags) 146 { 147 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); 148 int ret; 149 150 ret = -EBADFD; 151 if (!io_is_uring_fops(req->file)) 152 goto done; 153 154 switch (msg->cmd) { 155 case IORING_MSG_DATA: 156 ret = io_msg_ring_data(req); 157 break; 158 case IORING_MSG_SEND_FD: 159 ret = io_msg_send_fd(req, issue_flags); 160 break; 161 default: 162 ret = -EINVAL; 163 break; 164 } 165 166 done: 167 if (ret < 0) 168 req_set_fail(req); 169 io_req_set_res(req, ret, 0); 170 /* put file to avoid an attempt to IOPOLL the req */ 171 if (!(req->flags & REQ_F_FIXED_FILE)) 172 io_put_file(req->file); 173 req->file = NULL; 174 return IOU_OK; 175 } 176