1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/mm.h>
6 #include <linux/slab.h>
7 #include <linux/nospec.h>
8 #include <linux/io_uring.h>
9
10 #include <uapi/linux/io_uring.h>
11
12 #include "io_uring.h"
13 #include "tctx.h"
14
io_init_wq_offload(struct io_ring_ctx * ctx,struct task_struct * task)15 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
16 struct task_struct *task)
17 {
18 struct io_wq_hash *hash;
19 struct io_wq_data data;
20 unsigned int concurrency;
21
22 mutex_lock(&ctx->uring_lock);
23 hash = ctx->hash_map;
24 if (!hash) {
25 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
26 if (!hash) {
27 mutex_unlock(&ctx->uring_lock);
28 return ERR_PTR(-ENOMEM);
29 }
30 refcount_set(&hash->refs, 1);
31 init_waitqueue_head(&hash->wait);
32 ctx->hash_map = hash;
33 }
34 mutex_unlock(&ctx->uring_lock);
35
36 data.hash = hash;
37 data.task = task;
38 data.free_work = io_wq_free_work;
39 data.do_work = io_wq_submit_work;
40
41 /* Do QD, or 4 * CPUS, whatever is smallest */
42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
43
44 return io_wq_create(concurrency, &data);
45 }
46
__io_uring_free(struct task_struct * tsk)47 void __io_uring_free(struct task_struct *tsk)
48 {
49 struct io_uring_task *tctx = tsk->io_uring;
50 struct io_tctx_node *node;
51 unsigned long index;
52
53 /*
54 * Fault injection forcing allocation errors in the xa_store() path
55 * can lead to xa_empty() returning false, even though no actual
56 * node is stored in the xarray. Until that gets sorted out, attempt
57 * an iteration here and warn if any entries are found.
58 */
59 xa_for_each(&tctx->xa, index, node) {
60 WARN_ON_ONCE(1);
61 break;
62 }
63 WARN_ON_ONCE(tctx->io_wq);
64 WARN_ON_ONCE(tctx->cached_refs);
65
66 percpu_counter_destroy(&tctx->inflight);
67 kfree(tctx);
68 tsk->io_uring = NULL;
69 }
70
io_uring_alloc_task_context(struct task_struct * task,struct io_ring_ctx * ctx)71 __cold int io_uring_alloc_task_context(struct task_struct *task,
72 struct io_ring_ctx *ctx)
73 {
74 struct io_uring_task *tctx;
75 int ret;
76
77 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
78 if (unlikely(!tctx))
79 return -ENOMEM;
80
81 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
82 if (unlikely(ret)) {
83 kfree(tctx);
84 return ret;
85 }
86
87 tctx->io_wq = io_init_wq_offload(ctx, task);
88 if (IS_ERR(tctx->io_wq)) {
89 ret = PTR_ERR(tctx->io_wq);
90 percpu_counter_destroy(&tctx->inflight);
91 kfree(tctx);
92 return ret;
93 }
94
95 tctx->task = task;
96 xa_init(&tctx->xa);
97 init_waitqueue_head(&tctx->wait);
98 atomic_set(&tctx->in_cancel, 0);
99 atomic_set(&tctx->inflight_tracked, 0);
100 task->io_uring = tctx;
101 init_llist_head(&tctx->task_list);
102 init_task_work(&tctx->task_work, tctx_task_work);
103 return 0;
104 }
105
__io_uring_add_tctx_node(struct io_ring_ctx * ctx)106 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
107 {
108 struct io_uring_task *tctx = current->io_uring;
109 struct io_tctx_node *node;
110 int ret;
111
112 if (unlikely(!tctx)) {
113 ret = io_uring_alloc_task_context(current, ctx);
114 if (unlikely(ret))
115 return ret;
116
117 tctx = current->io_uring;
118 if (ctx->iowq_limits_set) {
119 unsigned int limits[2] = { ctx->iowq_limits[0],
120 ctx->iowq_limits[1], };
121
122 ret = io_wq_max_workers(tctx->io_wq, limits);
123 if (ret)
124 return ret;
125 }
126 }
127 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
128 node = kmalloc(sizeof(*node), GFP_KERNEL);
129 if (!node)
130 return -ENOMEM;
131 node->ctx = ctx;
132 node->task = current;
133
134 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
135 node, GFP_KERNEL));
136 if (ret) {
137 kfree(node);
138 return ret;
139 }
140
141 mutex_lock(&ctx->uring_lock);
142 list_add(&node->ctx_node, &ctx->tctx_list);
143 mutex_unlock(&ctx->uring_lock);
144 }
145 return 0;
146 }
147
__io_uring_add_tctx_node_from_submit(struct io_ring_ctx * ctx)148 int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
149 {
150 int ret;
151
152 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
153 && ctx->submitter_task != current)
154 return -EEXIST;
155
156 ret = __io_uring_add_tctx_node(ctx);
157 if (ret)
158 return ret;
159
160 current->io_uring->last = ctx;
161 return 0;
162 }
163
164 /*
165 * Remove this io_uring_file -> task mapping.
166 */
io_uring_del_tctx_node(unsigned long index)167 __cold void io_uring_del_tctx_node(unsigned long index)
168 {
169 struct io_uring_task *tctx = current->io_uring;
170 struct io_tctx_node *node;
171
172 if (!tctx)
173 return;
174 node = xa_erase(&tctx->xa, index);
175 if (!node)
176 return;
177
178 WARN_ON_ONCE(current != node->task);
179 WARN_ON_ONCE(list_empty(&node->ctx_node));
180
181 mutex_lock(&node->ctx->uring_lock);
182 list_del(&node->ctx_node);
183 mutex_unlock(&node->ctx->uring_lock);
184
185 if (tctx->last == node->ctx)
186 tctx->last = NULL;
187 kfree(node);
188 }
189
io_uring_clean_tctx(struct io_uring_task * tctx)190 __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
191 {
192 struct io_wq *wq = tctx->io_wq;
193 struct io_tctx_node *node;
194 unsigned long index;
195
196 xa_for_each(&tctx->xa, index, node) {
197 io_uring_del_tctx_node(index);
198 cond_resched();
199 }
200 if (wq) {
201 /*
202 * Must be after io_uring_del_tctx_node() (removes nodes under
203 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
204 */
205 io_wq_put_and_exit(wq);
206 tctx->io_wq = NULL;
207 }
208 }
209
io_uring_unreg_ringfd(void)210 void io_uring_unreg_ringfd(void)
211 {
212 struct io_uring_task *tctx = current->io_uring;
213 int i;
214
215 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
216 if (tctx->registered_rings[i]) {
217 fput(tctx->registered_rings[i]);
218 tctx->registered_rings[i] = NULL;
219 }
220 }
221 }
222
io_ring_add_registered_file(struct io_uring_task * tctx,struct file * file,int start,int end)223 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
224 int start, int end)
225 {
226 int offset;
227 for (offset = start; offset < end; offset++) {
228 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
229 if (tctx->registered_rings[offset])
230 continue;
231
232 tctx->registered_rings[offset] = file;
233 return offset;
234 }
235 return -EBUSY;
236 }
237
io_ring_add_registered_fd(struct io_uring_task * tctx,int fd,int start,int end)238 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
239 int start, int end)
240 {
241 struct file *file;
242 int offset;
243
244 file = fget(fd);
245 if (!file) {
246 return -EBADF;
247 } else if (!io_is_uring_fops(file)) {
248 fput(file);
249 return -EOPNOTSUPP;
250 }
251 offset = io_ring_add_registered_file(tctx, file, start, end);
252 if (offset < 0)
253 fput(file);
254 return offset;
255 }
256
257 /*
258 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
259 * invocation. User passes in an array of struct io_uring_rsrc_update
260 * with ->data set to the ring_fd, and ->offset given for the desired
261 * index. If no index is desired, application may set ->offset == -1U
262 * and we'll find an available index. Returns number of entries
263 * successfully processed, or < 0 on error if none were processed.
264 */
io_ringfd_register(struct io_ring_ctx * ctx,void __user * __arg,unsigned nr_args)265 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
266 unsigned nr_args)
267 {
268 struct io_uring_rsrc_update __user *arg = __arg;
269 struct io_uring_rsrc_update reg;
270 struct io_uring_task *tctx;
271 int ret, i;
272
273 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
274 return -EINVAL;
275
276 mutex_unlock(&ctx->uring_lock);
277 ret = __io_uring_add_tctx_node(ctx);
278 mutex_lock(&ctx->uring_lock);
279 if (ret)
280 return ret;
281
282 tctx = current->io_uring;
283 for (i = 0; i < nr_args; i++) {
284 int start, end;
285
286 if (copy_from_user(®, &arg[i], sizeof(reg))) {
287 ret = -EFAULT;
288 break;
289 }
290
291 if (reg.resv) {
292 ret = -EINVAL;
293 break;
294 }
295
296 if (reg.offset == -1U) {
297 start = 0;
298 end = IO_RINGFD_REG_MAX;
299 } else {
300 if (reg.offset >= IO_RINGFD_REG_MAX) {
301 ret = -EINVAL;
302 break;
303 }
304 start = reg.offset;
305 end = start + 1;
306 }
307
308 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
309 if (ret < 0)
310 break;
311
312 reg.offset = ret;
313 if (copy_to_user(&arg[i], ®, sizeof(reg))) {
314 fput(tctx->registered_rings[reg.offset]);
315 tctx->registered_rings[reg.offset] = NULL;
316 ret = -EFAULT;
317 break;
318 }
319 }
320
321 return i ? i : ret;
322 }
323
io_ringfd_unregister(struct io_ring_ctx * ctx,void __user * __arg,unsigned nr_args)324 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
325 unsigned nr_args)
326 {
327 struct io_uring_rsrc_update __user *arg = __arg;
328 struct io_uring_task *tctx = current->io_uring;
329 struct io_uring_rsrc_update reg;
330 int ret = 0, i;
331
332 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
333 return -EINVAL;
334 if (!tctx)
335 return 0;
336
337 for (i = 0; i < nr_args; i++) {
338 if (copy_from_user(®, &arg[i], sizeof(reg))) {
339 ret = -EFAULT;
340 break;
341 }
342 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
343 ret = -EINVAL;
344 break;
345 }
346
347 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
348 if (tctx->registered_rings[reg.offset]) {
349 fput(tctx->registered_rings[reg.offset]);
350 tctx->registered_rings[reg.offset] = NULL;
351 }
352 }
353
354 return i ? i : ret;
355 }
356