1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/mm.h>
6 #include <linux/slab.h>
7 #include <linux/nospec.h>
8 #include <linux/io_uring.h>
9
10 #include <uapi/linux/io_uring.h>
11
12 #include "io_uring.h"
13 #include "tctx.h"
14 #include "bpf_filter.h"
15
io_init_wq_offload(struct io_ring_ctx * ctx,struct task_struct * task)16 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
17 struct task_struct *task)
18 {
19 struct io_wq_hash *hash;
20 struct io_wq_data data;
21 unsigned int concurrency;
22
23 mutex_lock(&ctx->uring_lock);
24 hash = ctx->hash_map;
25 if (!hash) {
26 hash = kzalloc_obj(*hash);
27 if (!hash) {
28 mutex_unlock(&ctx->uring_lock);
29 return ERR_PTR(-ENOMEM);
30 }
31 refcount_set(&hash->refs, 1);
32 init_waitqueue_head(&hash->wait);
33 ctx->hash_map = hash;
34 }
35 mutex_unlock(&ctx->uring_lock);
36
37 data.hash = hash;
38 data.task = task;
39
40 /* Do QD, or 4 * CPUS, whatever is smallest */
41 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
42
43 return io_wq_create(concurrency, &data);
44 }
45
__io_uring_free(struct task_struct * tsk)46 void __io_uring_free(struct task_struct *tsk)
47 {
48 struct io_uring_task *tctx = tsk->io_uring;
49 struct io_tctx_node *node;
50 unsigned long index;
51
52 /*
53 * Fault injection forcing allocation errors in the xa_store() path
54 * can lead to xa_empty() returning false, even though no actual
55 * node is stored in the xarray. Until that gets sorted out, attempt
56 * an iteration here and warn if any entries are found.
57 */
58 if (tctx) {
59 xa_for_each(&tctx->xa, index, node) {
60 WARN_ON_ONCE(1);
61 break;
62 }
63 WARN_ON_ONCE(tctx->io_wq);
64 WARN_ON_ONCE(tctx->cached_refs);
65
66 percpu_counter_destroy(&tctx->inflight);
67 kfree(tctx);
68 tsk->io_uring = NULL;
69 }
70 if (tsk->io_uring_restrict) {
71 io_put_bpf_filters(tsk->io_uring_restrict);
72 kfree(tsk->io_uring_restrict);
73 tsk->io_uring_restrict = NULL;
74 }
75 }
76
io_uring_alloc_task_context(struct task_struct * task,struct io_ring_ctx * ctx)77 __cold struct io_uring_task *io_uring_alloc_task_context(struct task_struct *task,
78 struct io_ring_ctx *ctx)
79 {
80 struct io_uring_task *tctx;
81 int ret;
82
83 tctx = kzalloc_obj(*tctx);
84 if (unlikely(!tctx))
85 return ERR_PTR(-ENOMEM);
86
87 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
88 if (unlikely(ret)) {
89 kfree(tctx);
90 return ERR_PTR(ret);
91 }
92
93 tctx->io_wq = io_init_wq_offload(ctx, task);
94 if (IS_ERR(tctx->io_wq)) {
95 ret = PTR_ERR(tctx->io_wq);
96 percpu_counter_destroy(&tctx->inflight);
97 kfree(tctx);
98 return ERR_PTR(ret);
99 }
100
101 tctx->task = task;
102 xa_init(&tctx->xa);
103 init_waitqueue_head(&tctx->wait);
104 atomic_set(&tctx->in_cancel, 0);
105 atomic_set(&tctx->inflight_tracked, 0);
106 init_llist_head(&tctx->task_list);
107 init_task_work(&tctx->task_work, tctx_task_work);
108 return tctx;
109 }
110
io_tctx_install_node(struct io_ring_ctx * ctx,struct io_uring_task * tctx)111 static int io_tctx_install_node(struct io_ring_ctx *ctx,
112 struct io_uring_task *tctx)
113 {
114 struct io_tctx_node *node;
115 int ret;
116
117 if (xa_load(&tctx->xa, (unsigned long)ctx))
118 return 0;
119
120 node = kmalloc_obj(*node);
121 if (!node)
122 return -ENOMEM;
123 node->ctx = ctx;
124 node->task = current;
125
126 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
127 node, GFP_KERNEL));
128 if (ret) {
129 kfree(node);
130 return ret;
131 }
132
133 mutex_lock(&ctx->tctx_lock);
134 list_add(&node->ctx_node, &ctx->tctx_list);
135 mutex_unlock(&ctx->tctx_lock);
136 return 0;
137 }
138
__io_uring_add_tctx_node(struct io_ring_ctx * ctx)139 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
140 {
141 struct io_uring_task *tctx = current->io_uring;
142 int ret;
143
144 if (unlikely(!tctx)) {
145 tctx = io_uring_alloc_task_context(current, ctx);
146 if (IS_ERR(tctx))
147 return PTR_ERR(tctx);
148
149 if (data_race(ctx->int_flags) & IO_RING_F_IOWQ_LIMITS_SET) {
150 unsigned int limits[2];
151
152 mutex_lock(&ctx->uring_lock);
153 limits[0] = ctx->iowq_limits[0];
154 limits[1] = ctx->iowq_limits[1];
155 mutex_unlock(&ctx->uring_lock);
156
157 ret = io_wq_max_workers(tctx->io_wq, limits);
158 if (ret)
159 goto err_free;
160 }
161 }
162
163 /*
164 * Re-activate io-wq keepalive on any new io_uring usage. The wq may have
165 * been marked for idle-exit when the task temporarily had no active
166 * io_uring instances.
167 */
168 if (tctx->io_wq)
169 io_wq_set_exit_on_idle(tctx->io_wq, false);
170
171 ret = io_tctx_install_node(ctx, tctx);
172 if (!ret) {
173 current->io_uring = tctx;
174 return 0;
175 }
176 if (!current->io_uring) {
177 err_free:
178 if (tctx->io_wq) {
179 io_wq_exit_start(tctx->io_wq);
180 io_wq_put_and_exit(tctx->io_wq);
181 }
182 percpu_counter_destroy(&tctx->inflight);
183 kfree(tctx);
184 }
185 return ret;
186 }
187
__io_uring_add_tctx_node_from_submit(struct io_ring_ctx * ctx)188 int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
189 {
190 int ret;
191
192 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
193 && ctx->submitter_task != current)
194 return -EEXIST;
195
196 ret = __io_uring_add_tctx_node(ctx);
197 if (ret)
198 return ret;
199
200 current->io_uring->last = ctx;
201 return 0;
202 }
203
204 /*
205 * Remove this io_uring_file -> task mapping.
206 */
io_uring_del_tctx_node(unsigned long index)207 __cold void io_uring_del_tctx_node(unsigned long index)
208 {
209 struct io_uring_task *tctx = current->io_uring;
210 struct io_tctx_node *node;
211
212 if (!tctx)
213 return;
214 node = xa_erase(&tctx->xa, index);
215 if (!node)
216 return;
217
218 WARN_ON_ONCE(current != node->task);
219 WARN_ON_ONCE(list_empty(&node->ctx_node));
220
221 mutex_lock(&node->ctx->tctx_lock);
222 list_del(&node->ctx_node);
223 mutex_unlock(&node->ctx->tctx_lock);
224
225 if (tctx->last == node->ctx)
226 tctx->last = NULL;
227 kfree(node);
228
229 if (xa_empty(&tctx->xa) && tctx->io_wq)
230 io_wq_set_exit_on_idle(tctx->io_wq, true);
231 }
232
io_uring_clean_tctx(struct io_uring_task * tctx)233 __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
234 {
235 struct io_wq *wq = tctx->io_wq;
236 struct io_tctx_node *node;
237 unsigned long index;
238
239 xa_for_each(&tctx->xa, index, node) {
240 io_uring_del_tctx_node(index);
241 cond_resched();
242 }
243 if (wq) {
244 /*
245 * Must be after io_uring_del_tctx_node() (removes nodes under
246 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
247 */
248 io_wq_put_and_exit(wq);
249 tctx->io_wq = NULL;
250 }
251 }
252
io_uring_unreg_ringfd(void)253 void io_uring_unreg_ringfd(void)
254 {
255 struct io_uring_task *tctx = current->io_uring;
256 int i;
257
258 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
259 if (tctx->registered_rings[i]) {
260 fput(tctx->registered_rings[i]);
261 tctx->registered_rings[i] = NULL;
262 }
263 }
264 }
265
io_ring_add_registered_file(struct io_uring_task * tctx,struct file * file,int start,int end)266 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file,
267 int start, int end)
268 {
269 int offset, idx;
270 for (offset = start; offset < end; offset++) {
271 idx = array_index_nospec(offset, IO_RINGFD_REG_MAX);
272 if (tctx->registered_rings[idx])
273 continue;
274
275 tctx->registered_rings[idx] = file;
276 return idx;
277 }
278 return -EBUSY;
279 }
280
io_ring_add_registered_fd(struct io_uring_task * tctx,int fd,int start,int end)281 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
282 int start, int end)
283 {
284 struct file *file;
285 int offset;
286
287 file = fget(fd);
288 if (!file) {
289 return -EBADF;
290 } else if (!io_is_uring_fops(file)) {
291 fput(file);
292 return -EOPNOTSUPP;
293 }
294 offset = io_ring_add_registered_file(tctx, file, start, end);
295 if (offset < 0)
296 fput(file);
297 return offset;
298 }
299
300 /*
301 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
302 * invocation. User passes in an array of struct io_uring_rsrc_update
303 * with ->data set to the ring_fd, and ->offset given for the desired
304 * index. If no index is desired, application may set ->offset == -1U
305 * and we'll find an available index. Returns number of entries
306 * successfully processed, or < 0 on error if none were processed.
307 */
io_ringfd_register(struct io_ring_ctx * ctx,void __user * __arg,unsigned nr_args)308 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
309 unsigned nr_args)
310 {
311 struct io_uring_rsrc_update __user *arg = __arg;
312 struct io_uring_rsrc_update reg;
313 struct io_uring_task *tctx;
314 int ret, i;
315
316 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
317 return -EINVAL;
318
319 mutex_unlock(&ctx->uring_lock);
320 ret = __io_uring_add_tctx_node(ctx);
321 mutex_lock(&ctx->uring_lock);
322 if (ret)
323 return ret;
324
325 tctx = current->io_uring;
326 for (i = 0; i < nr_args; i++) {
327 int start, end;
328
329 if (copy_from_user(®, &arg[i], sizeof(reg))) {
330 ret = -EFAULT;
331 break;
332 }
333
334 if (reg.resv) {
335 ret = -EINVAL;
336 break;
337 }
338
339 if (reg.offset == -1U) {
340 start = 0;
341 end = IO_RINGFD_REG_MAX;
342 } else {
343 if (reg.offset >= IO_RINGFD_REG_MAX) {
344 ret = -EINVAL;
345 break;
346 }
347 start = reg.offset;
348 end = start + 1;
349 }
350
351 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
352 if (ret < 0)
353 break;
354
355 reg.offset = ret;
356 if (copy_to_user(&arg[i], ®, sizeof(reg))) {
357 fput(tctx->registered_rings[reg.offset]);
358 tctx->registered_rings[reg.offset] = NULL;
359 ret = -EFAULT;
360 break;
361 }
362 }
363
364 return i ? i : ret;
365 }
366
io_ringfd_unregister(struct io_ring_ctx * ctx,void __user * __arg,unsigned nr_args)367 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
368 unsigned nr_args)
369 {
370 struct io_uring_rsrc_update __user *arg = __arg;
371 struct io_uring_task *tctx = current->io_uring;
372 struct io_uring_rsrc_update reg;
373 int ret = 0, i;
374
375 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
376 return -EINVAL;
377 if (!tctx)
378 return 0;
379
380 for (i = 0; i < nr_args; i++) {
381 if (copy_from_user(®, &arg[i], sizeof(reg))) {
382 ret = -EFAULT;
383 break;
384 }
385 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
386 ret = -EINVAL;
387 break;
388 }
389
390 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
391 if (tctx->registered_rings[reg.offset]) {
392 fput(tctx->registered_rings[reg.offset]);
393 tctx->registered_rings[reg.offset] = NULL;
394 }
395 }
396
397 return i ? i : ret;
398 }
399
__io_uring_fork(struct task_struct * tsk)400 int __io_uring_fork(struct task_struct *tsk)
401 {
402 struct io_restriction *res, *src = tsk->io_uring_restrict;
403
404 /* Don't leave it dangling on error */
405 tsk->io_uring_restrict = NULL;
406
407 res = kzalloc_obj(*res, GFP_KERNEL_ACCOUNT);
408 if (!res)
409 return -ENOMEM;
410
411 tsk->io_uring_restrict = res;
412 io_restriction_clone(res, src);
413 return 0;
414 }
415