1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/mm.h> 6 #include <linux/slab.h> 7 #include <linux/nospec.h> 8 #include <linux/io_uring.h> 9 10 #include <uapi/linux/io_uring.h> 11 12 #include "io_uring.h" 13 #include "tctx.h" 14 #include "bpf_filter.h" 15 16 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx, 17 struct task_struct *task) 18 { 19 struct io_wq_hash *hash; 20 struct io_wq_data data; 21 unsigned int concurrency; 22 23 mutex_lock(&ctx->uring_lock); 24 hash = ctx->hash_map; 25 if (!hash) { 26 hash = kzalloc_obj(*hash); 27 if (!hash) { 28 mutex_unlock(&ctx->uring_lock); 29 return ERR_PTR(-ENOMEM); 30 } 31 refcount_set(&hash->refs, 1); 32 init_waitqueue_head(&hash->wait); 33 ctx->hash_map = hash; 34 } 35 mutex_unlock(&ctx->uring_lock); 36 37 data.hash = hash; 38 data.task = task; 39 40 /* Do QD, or 4 * CPUS, whatever is smallest */ 41 concurrency = min(ctx->sq_entries, 4 * num_online_cpus()); 42 43 return io_wq_create(concurrency, &data); 44 } 45 46 void __io_uring_free(struct task_struct *tsk) 47 { 48 struct io_uring_task *tctx = tsk->io_uring; 49 struct io_tctx_node *node; 50 unsigned long index; 51 52 /* 53 * Fault injection forcing allocation errors in the xa_store() path 54 * can lead to xa_empty() returning false, even though no actual 55 * node is stored in the xarray. Until that gets sorted out, attempt 56 * an iteration here and warn if any entries are found. 57 */ 58 if (tctx) { 59 xa_for_each(&tctx->xa, index, node) { 60 WARN_ON_ONCE(1); 61 break; 62 } 63 WARN_ON_ONCE(tctx->io_wq); 64 WARN_ON_ONCE(tctx->cached_refs); 65 66 percpu_counter_destroy(&tctx->inflight); 67 kfree(tctx); 68 tsk->io_uring = NULL; 69 } 70 if (tsk->io_uring_restrict) { 71 io_put_bpf_filters(tsk->io_uring_restrict); 72 kfree(tsk->io_uring_restrict); 73 tsk->io_uring_restrict = NULL; 74 } 75 } 76 77 __cold struct io_uring_task *io_uring_alloc_task_context(struct task_struct *task, 78 struct io_ring_ctx *ctx) 79 { 80 struct io_uring_task *tctx; 81 int ret; 82 83 tctx = kzalloc_obj(*tctx); 84 if (unlikely(!tctx)) 85 return ERR_PTR(-ENOMEM); 86 87 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); 88 if (unlikely(ret)) { 89 kfree(tctx); 90 return ERR_PTR(ret); 91 } 92 93 tctx->io_wq = io_init_wq_offload(ctx, task); 94 if (IS_ERR(tctx->io_wq)) { 95 ret = PTR_ERR(tctx->io_wq); 96 percpu_counter_destroy(&tctx->inflight); 97 kfree(tctx); 98 return ERR_PTR(ret); 99 } 100 101 tctx->task = task; 102 xa_init(&tctx->xa); 103 init_waitqueue_head(&tctx->wait); 104 atomic_set(&tctx->in_cancel, 0); 105 atomic_set(&tctx->inflight_tracked, 0); 106 init_llist_head(&tctx->task_list); 107 init_task_work(&tctx->task_work, tctx_task_work); 108 return tctx; 109 } 110 111 static int io_tctx_install_node(struct io_ring_ctx *ctx, 112 struct io_uring_task *tctx) 113 { 114 struct io_tctx_node *node; 115 int ret; 116 117 if (xa_load(&tctx->xa, (unsigned long)ctx)) 118 return 0; 119 120 node = kmalloc_obj(*node); 121 if (!node) 122 return -ENOMEM; 123 node->ctx = ctx; 124 node->task = current; 125 126 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx, 127 node, GFP_KERNEL)); 128 if (ret) { 129 kfree(node); 130 return ret; 131 } 132 133 mutex_lock(&ctx->tctx_lock); 134 list_add(&node->ctx_node, &ctx->tctx_list); 135 mutex_unlock(&ctx->tctx_lock); 136 return 0; 137 } 138 139 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx) 140 { 141 struct io_uring_task *tctx = current->io_uring; 142 int ret; 143 144 if (unlikely(!tctx)) { 145 tctx = io_uring_alloc_task_context(current, ctx); 146 if (IS_ERR(tctx)) 147 return PTR_ERR(tctx); 148 149 if (ctx->int_flags & IO_RING_F_IOWQ_LIMITS_SET) { 150 unsigned int limits[2] = { ctx->iowq_limits[0], 151 ctx->iowq_limits[1], }; 152 153 ret = io_wq_max_workers(tctx->io_wq, limits); 154 if (ret) 155 goto err_free; 156 } 157 } 158 159 /* 160 * Re-activate io-wq keepalive on any new io_uring usage. The wq may have 161 * been marked for idle-exit when the task temporarily had no active 162 * io_uring instances. 163 */ 164 if (tctx->io_wq) 165 io_wq_set_exit_on_idle(tctx->io_wq, false); 166 167 ret = io_tctx_install_node(ctx, tctx); 168 if (!ret) { 169 current->io_uring = tctx; 170 return 0; 171 } 172 if (!current->io_uring) { 173 err_free: 174 io_wq_put_and_exit(tctx->io_wq); 175 percpu_counter_destroy(&tctx->inflight); 176 kfree(tctx); 177 } 178 return ret; 179 } 180 181 int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx) 182 { 183 int ret; 184 185 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER 186 && ctx->submitter_task != current) 187 return -EEXIST; 188 189 ret = __io_uring_add_tctx_node(ctx); 190 if (ret) 191 return ret; 192 193 current->io_uring->last = ctx; 194 return 0; 195 } 196 197 /* 198 * Remove this io_uring_file -> task mapping. 199 */ 200 __cold void io_uring_del_tctx_node(unsigned long index) 201 { 202 struct io_uring_task *tctx = current->io_uring; 203 struct io_tctx_node *node; 204 205 if (!tctx) 206 return; 207 node = xa_erase(&tctx->xa, index); 208 if (!node) 209 return; 210 211 WARN_ON_ONCE(current != node->task); 212 WARN_ON_ONCE(list_empty(&node->ctx_node)); 213 214 mutex_lock(&node->ctx->tctx_lock); 215 list_del(&node->ctx_node); 216 mutex_unlock(&node->ctx->tctx_lock); 217 218 if (tctx->last == node->ctx) 219 tctx->last = NULL; 220 kfree(node); 221 222 if (xa_empty(&tctx->xa) && tctx->io_wq) 223 io_wq_set_exit_on_idle(tctx->io_wq, true); 224 } 225 226 __cold void io_uring_clean_tctx(struct io_uring_task *tctx) 227 { 228 struct io_wq *wq = tctx->io_wq; 229 struct io_tctx_node *node; 230 unsigned long index; 231 232 xa_for_each(&tctx->xa, index, node) { 233 io_uring_del_tctx_node(index); 234 cond_resched(); 235 } 236 if (wq) { 237 /* 238 * Must be after io_uring_del_tctx_node() (removes nodes under 239 * uring_lock) to avoid race with io_uring_try_cancel_iowq(). 240 */ 241 io_wq_put_and_exit(wq); 242 tctx->io_wq = NULL; 243 } 244 } 245 246 void io_uring_unreg_ringfd(void) 247 { 248 struct io_uring_task *tctx = current->io_uring; 249 int i; 250 251 for (i = 0; i < IO_RINGFD_REG_MAX; i++) { 252 if (tctx->registered_rings[i]) { 253 fput(tctx->registered_rings[i]); 254 tctx->registered_rings[i] = NULL; 255 } 256 } 257 } 258 259 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, 260 int start, int end) 261 { 262 int offset, idx; 263 for (offset = start; offset < end; offset++) { 264 idx = array_index_nospec(offset, IO_RINGFD_REG_MAX); 265 if (tctx->registered_rings[idx]) 266 continue; 267 268 tctx->registered_rings[idx] = file; 269 return idx; 270 } 271 return -EBUSY; 272 } 273 274 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd, 275 int start, int end) 276 { 277 struct file *file; 278 int offset; 279 280 file = fget(fd); 281 if (!file) { 282 return -EBADF; 283 } else if (!io_is_uring_fops(file)) { 284 fput(file); 285 return -EOPNOTSUPP; 286 } 287 offset = io_ring_add_registered_file(tctx, file, start, end); 288 if (offset < 0) 289 fput(file); 290 return offset; 291 } 292 293 /* 294 * Register a ring fd to avoid fdget/fdput for each io_uring_enter() 295 * invocation. User passes in an array of struct io_uring_rsrc_update 296 * with ->data set to the ring_fd, and ->offset given for the desired 297 * index. If no index is desired, application may set ->offset == -1U 298 * and we'll find an available index. Returns number of entries 299 * successfully processed, or < 0 on error if none were processed. 300 */ 301 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg, 302 unsigned nr_args) 303 { 304 struct io_uring_rsrc_update __user *arg = __arg; 305 struct io_uring_rsrc_update reg; 306 struct io_uring_task *tctx; 307 int ret, i; 308 309 if (!nr_args || nr_args > IO_RINGFD_REG_MAX) 310 return -EINVAL; 311 312 mutex_unlock(&ctx->uring_lock); 313 ret = __io_uring_add_tctx_node(ctx); 314 mutex_lock(&ctx->uring_lock); 315 if (ret) 316 return ret; 317 318 tctx = current->io_uring; 319 for (i = 0; i < nr_args; i++) { 320 int start, end; 321 322 if (copy_from_user(®, &arg[i], sizeof(reg))) { 323 ret = -EFAULT; 324 break; 325 } 326 327 if (reg.resv) { 328 ret = -EINVAL; 329 break; 330 } 331 332 if (reg.offset == -1U) { 333 start = 0; 334 end = IO_RINGFD_REG_MAX; 335 } else { 336 if (reg.offset >= IO_RINGFD_REG_MAX) { 337 ret = -EINVAL; 338 break; 339 } 340 start = reg.offset; 341 end = start + 1; 342 } 343 344 ret = io_ring_add_registered_fd(tctx, reg.data, start, end); 345 if (ret < 0) 346 break; 347 348 reg.offset = ret; 349 if (copy_to_user(&arg[i], ®, sizeof(reg))) { 350 fput(tctx->registered_rings[reg.offset]); 351 tctx->registered_rings[reg.offset] = NULL; 352 ret = -EFAULT; 353 break; 354 } 355 } 356 357 return i ? i : ret; 358 } 359 360 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg, 361 unsigned nr_args) 362 { 363 struct io_uring_rsrc_update __user *arg = __arg; 364 struct io_uring_task *tctx = current->io_uring; 365 struct io_uring_rsrc_update reg; 366 int ret = 0, i; 367 368 if (!nr_args || nr_args > IO_RINGFD_REG_MAX) 369 return -EINVAL; 370 if (!tctx) 371 return 0; 372 373 for (i = 0; i < nr_args; i++) { 374 if (copy_from_user(®, &arg[i], sizeof(reg))) { 375 ret = -EFAULT; 376 break; 377 } 378 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) { 379 ret = -EINVAL; 380 break; 381 } 382 383 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX); 384 if (tctx->registered_rings[reg.offset]) { 385 fput(tctx->registered_rings[reg.offset]); 386 tctx->registered_rings[reg.offset] = NULL; 387 } 388 } 389 390 return i ? i : ret; 391 } 392 393 int __io_uring_fork(struct task_struct *tsk) 394 { 395 struct io_restriction *res, *src = tsk->io_uring_restrict; 396 397 /* Don't leave it dangling on error */ 398 tsk->io_uring_restrict = NULL; 399 400 res = kzalloc_obj(*res, GFP_KERNEL_ACCOUNT); 401 if (!res) 402 return -ENOMEM; 403 404 tsk->io_uring_restrict = res; 405 io_restriction_clone(res, src); 406 return 0; 407 } 408