Lines Matching +full:reg +full:- +full:offset
1 // SPDX-License-Identifier: GPL-2.0
22 mutex_lock(&ctx->uring_lock); in io_init_wq_offload()
23 hash = ctx->hash_map; in io_init_wq_offload()
27 mutex_unlock(&ctx->uring_lock); in io_init_wq_offload()
28 return ERR_PTR(-ENOMEM); in io_init_wq_offload()
30 refcount_set(&hash->refs, 1); in io_init_wq_offload()
31 init_waitqueue_head(&hash->wait); in io_init_wq_offload()
32 ctx->hash_map = hash; in io_init_wq_offload()
34 mutex_unlock(&ctx->uring_lock); in io_init_wq_offload()
42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); in io_init_wq_offload()
49 struct io_uring_task *tctx = tsk->io_uring; in __io_uring_free()
51 WARN_ON_ONCE(!xa_empty(&tctx->xa)); in __io_uring_free()
52 WARN_ON_ONCE(tctx->io_wq); in __io_uring_free()
53 WARN_ON_ONCE(tctx->cached_refs); in __io_uring_free()
55 percpu_counter_destroy(&tctx->inflight); in __io_uring_free()
57 tsk->io_uring = NULL; in __io_uring_free()
68 return -ENOMEM; in io_uring_alloc_task_context()
70 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context()
76 tctx->io_wq = io_init_wq_offload(ctx, task); in io_uring_alloc_task_context()
77 if (IS_ERR(tctx->io_wq)) { in io_uring_alloc_task_context()
78 ret = PTR_ERR(tctx->io_wq); in io_uring_alloc_task_context()
79 percpu_counter_destroy(&tctx->inflight); in io_uring_alloc_task_context()
84 xa_init(&tctx->xa); in io_uring_alloc_task_context()
85 init_waitqueue_head(&tctx->wait); in io_uring_alloc_task_context()
86 atomic_set(&tctx->in_cancel, 0); in io_uring_alloc_task_context()
87 atomic_set(&tctx->inflight_tracked, 0); in io_uring_alloc_task_context()
88 task->io_uring = tctx; in io_uring_alloc_task_context()
89 init_llist_head(&tctx->task_list); in io_uring_alloc_task_context()
90 init_task_work(&tctx->task_work, tctx_task_work); in io_uring_alloc_task_context()
96 struct io_uring_task *tctx = current->io_uring; in __io_uring_add_tctx_node()
105 tctx = current->io_uring; in __io_uring_add_tctx_node()
106 if (ctx->iowq_limits_set) { in __io_uring_add_tctx_node()
107 unsigned int limits[2] = { ctx->iowq_limits[0], in __io_uring_add_tctx_node()
108 ctx->iowq_limits[1], }; in __io_uring_add_tctx_node()
110 ret = io_wq_max_workers(tctx->io_wq, limits); in __io_uring_add_tctx_node()
115 if (!xa_load(&tctx->xa, (unsigned long)ctx)) { in __io_uring_add_tctx_node()
118 return -ENOMEM; in __io_uring_add_tctx_node()
119 node->ctx = ctx; in __io_uring_add_tctx_node()
120 node->task = current; in __io_uring_add_tctx_node()
122 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, in __io_uring_add_tctx_node()
129 mutex_lock(&ctx->uring_lock); in __io_uring_add_tctx_node()
130 list_add(&node->ctx_node, &ctx->tctx_list); in __io_uring_add_tctx_node()
131 mutex_unlock(&ctx->uring_lock); in __io_uring_add_tctx_node()
140 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER in __io_uring_add_tctx_node_from_submit()
141 && ctx->submitter_task != current) in __io_uring_add_tctx_node_from_submit()
142 return -EEXIST; in __io_uring_add_tctx_node_from_submit()
148 current->io_uring->last = ctx; in __io_uring_add_tctx_node_from_submit()
153 * Remove this io_uring_file -> task mapping.
157 struct io_uring_task *tctx = current->io_uring; in io_uring_del_tctx_node()
162 node = xa_erase(&tctx->xa, index); in io_uring_del_tctx_node()
166 WARN_ON_ONCE(current != node->task); in io_uring_del_tctx_node()
167 WARN_ON_ONCE(list_empty(&node->ctx_node)); in io_uring_del_tctx_node()
169 mutex_lock(&node->ctx->uring_lock); in io_uring_del_tctx_node()
170 list_del(&node->ctx_node); in io_uring_del_tctx_node()
171 mutex_unlock(&node->ctx->uring_lock); in io_uring_del_tctx_node()
173 if (tctx->last == node->ctx) in io_uring_del_tctx_node()
174 tctx->last = NULL; in io_uring_del_tctx_node()
180 struct io_wq *wq = tctx->io_wq; in io_uring_clean_tctx()
184 xa_for_each(&tctx->xa, index, node) { in io_uring_clean_tctx()
194 tctx->io_wq = NULL; in io_uring_clean_tctx()
200 struct io_uring_task *tctx = current->io_uring; in io_uring_unreg_ringfd()
204 if (tctx->registered_rings[i]) { in io_uring_unreg_ringfd()
205 fput(tctx->registered_rings[i]); in io_uring_unreg_ringfd()
206 tctx->registered_rings[i] = NULL; in io_uring_unreg_ringfd()
214 int offset; in io_ring_add_registered_file() local
215 for (offset = start; offset < end; offset++) { in io_ring_add_registered_file()
216 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX); in io_ring_add_registered_file()
217 if (tctx->registered_rings[offset]) in io_ring_add_registered_file()
220 tctx->registered_rings[offset] = file; in io_ring_add_registered_file()
221 return offset; in io_ring_add_registered_file()
223 return -EBUSY; in io_ring_add_registered_file()
230 int offset; in io_ring_add_registered_fd() local
234 return -EBADF; in io_ring_add_registered_fd()
237 return -EOPNOTSUPP; in io_ring_add_registered_fd()
239 offset = io_ring_add_registered_file(tctx, file, start, end); in io_ring_add_registered_fd()
240 if (offset < 0) in io_ring_add_registered_fd()
242 return offset; in io_ring_add_registered_fd()
248 * with ->data set to the ring_fd, and ->offset given for the desired
249 * index. If no index is desired, application may set ->offset == -1U
257 struct io_uring_rsrc_update reg; in io_ringfd_register() local
262 return -EINVAL; in io_ringfd_register()
264 mutex_unlock(&ctx->uring_lock); in io_ringfd_register()
266 mutex_lock(&ctx->uring_lock); in io_ringfd_register()
270 tctx = current->io_uring; in io_ringfd_register()
274 if (copy_from_user(®, &arg[i], sizeof(reg))) { in io_ringfd_register()
275 ret = -EFAULT; in io_ringfd_register()
279 if (reg.resv) { in io_ringfd_register()
280 ret = -EINVAL; in io_ringfd_register()
284 if (reg.offset == -1U) { in io_ringfd_register()
288 if (reg.offset >= IO_RINGFD_REG_MAX) { in io_ringfd_register()
289 ret = -EINVAL; in io_ringfd_register()
292 start = reg.offset; in io_ringfd_register()
296 ret = io_ring_add_registered_fd(tctx, reg.data, start, end); in io_ringfd_register()
300 reg.offset = ret; in io_ringfd_register()
301 if (copy_to_user(&arg[i], ®, sizeof(reg))) { in io_ringfd_register()
302 fput(tctx->registered_rings[reg.offset]); in io_ringfd_register()
303 tctx->registered_rings[reg.offset] = NULL; in io_ringfd_register()
304 ret = -EFAULT; in io_ringfd_register()
316 struct io_uring_task *tctx = current->io_uring; in io_ringfd_unregister()
317 struct io_uring_rsrc_update reg; in io_ringfd_unregister() local
321 return -EINVAL; in io_ringfd_unregister()
326 if (copy_from_user(®, &arg[i], sizeof(reg))) { in io_ringfd_unregister()
327 ret = -EFAULT; in io_ringfd_unregister()
330 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) { in io_ringfd_unregister()
331 ret = -EINVAL; in io_ringfd_unregister()
335 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); in io_ringfd_unregister()
336 if (tctx->registered_rings[reg.offset]) { in io_ringfd_unregister()
337 fput(tctx->registered_rings[reg.offset]); in io_ringfd_unregister()
338 tctx->registered_rings[reg.offset] = NULL; in io_ringfd_unregister()