1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Code related to the io_uring_register() syscall 4 * 5 * Copyright (C) 2023 Jens Axboe 6 */ 7 #include <linux/kernel.h> 8 #include <linux/errno.h> 9 #include <linux/syscalls.h> 10 #include <linux/refcount.h> 11 #include <linux/bits.h> 12 #include <linux/fs.h> 13 #include <linux/file.h> 14 #include <linux/slab.h> 15 #include <linux/uaccess.h> 16 #include <linux/nospec.h> 17 #include <linux/compat.h> 18 #include <linux/io_uring.h> 19 #include <linux/io_uring_types.h> 20 21 #include "io_uring.h" 22 #include "opdef.h" 23 #include "tctx.h" 24 #include "rsrc.h" 25 #include "sqpoll.h" 26 #include "register.h" 27 #include "cancel.h" 28 #include "kbuf.h" 29 #include "napi.h" 30 31 #define IORING_MAX_RESTRICTIONS (IORING_RESTRICTION_LAST + \ 32 IORING_REGISTER_LAST + IORING_OP_LAST) 33 34 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg, 35 unsigned int eventfd_async) 36 { 37 struct io_ev_fd *ev_fd; 38 __s32 __user *fds = arg; 39 int fd; 40 41 ev_fd = rcu_dereference_protected(ctx->io_ev_fd, 42 lockdep_is_held(&ctx->uring_lock)); 43 if (ev_fd) 44 return -EBUSY; 45 46 if (copy_from_user(&fd, fds, sizeof(*fds))) 47 return -EFAULT; 48 49 ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL); 50 if (!ev_fd) 51 return -ENOMEM; 52 53 ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd); 54 if (IS_ERR(ev_fd->cq_ev_fd)) { 55 int ret = PTR_ERR(ev_fd->cq_ev_fd); 56 kfree(ev_fd); 57 return ret; 58 } 59 60 spin_lock(&ctx->completion_lock); 61 ctx->evfd_last_cq_tail = ctx->cached_cq_tail; 62 spin_unlock(&ctx->completion_lock); 63 64 ev_fd->eventfd_async = eventfd_async; 65 ctx->has_evfd = true; 66 rcu_assign_pointer(ctx->io_ev_fd, ev_fd); 67 atomic_set(&ev_fd->refs, 1); 68 atomic_set(&ev_fd->ops, 0); 69 return 0; 70 } 71 72 int io_eventfd_unregister(struct io_ring_ctx *ctx) 73 { 74 struct io_ev_fd *ev_fd; 75 76 ev_fd = rcu_dereference_protected(ctx->io_ev_fd, 77 lockdep_is_held(&ctx->uring_lock)); 78 if (ev_fd) { 79 ctx->has_evfd = false; 80 rcu_assign_pointer(ctx->io_ev_fd, NULL); 81 if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops)) 82 call_rcu(&ev_fd->rcu, io_eventfd_ops); 83 return 0; 84 } 85 86 return -ENXIO; 87 } 88 89 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg, 90 unsigned nr_args) 91 { 92 struct io_uring_probe *p; 93 size_t size; 94 int i, ret; 95 96 size = struct_size(p, ops, nr_args); 97 if (size == SIZE_MAX) 98 return -EOVERFLOW; 99 p = kzalloc(size, GFP_KERNEL); 100 if (!p) 101 return -ENOMEM; 102 103 ret = -EFAULT; 104 if (copy_from_user(p, arg, size)) 105 goto out; 106 ret = -EINVAL; 107 if (memchr_inv(p, 0, size)) 108 goto out; 109 110 p->last_op = IORING_OP_LAST - 1; 111 if (nr_args > IORING_OP_LAST) 112 nr_args = IORING_OP_LAST; 113 114 for (i = 0; i < nr_args; i++) { 115 p->ops[i].op = i; 116 if (!io_issue_defs[i].not_supported) 117 p->ops[i].flags = IO_URING_OP_SUPPORTED; 118 } 119 p->ops_len = i; 120 121 ret = 0; 122 if (copy_to_user(arg, p, size)) 123 ret = -EFAULT; 124 out: 125 kfree(p); 126 return ret; 127 } 128 129 int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id) 130 { 131 const struct cred *creds; 132 133 creds = xa_erase(&ctx->personalities, id); 134 if (creds) { 135 put_cred(creds); 136 return 0; 137 } 138 139 return -EINVAL; 140 } 141 142 143 static int io_register_personality(struct io_ring_ctx *ctx) 144 { 145 const struct cred *creds; 146 u32 id; 147 int ret; 148 149 creds = get_current_cred(); 150 151 ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds, 152 XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL); 153 if (ret < 0) { 154 put_cred(creds); 155 return ret; 156 } 157 return id; 158 } 159 160 static __cold int io_register_restrictions(struct io_ring_ctx *ctx, 161 void __user *arg, unsigned int nr_args) 162 { 163 struct io_uring_restriction *res; 164 size_t size; 165 int i, ret; 166 167 /* Restrictions allowed only if rings started disabled */ 168 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) 169 return -EBADFD; 170 171 /* We allow only a single restrictions registration */ 172 if (ctx->restrictions.registered) 173 return -EBUSY; 174 175 if (!arg || nr_args > IORING_MAX_RESTRICTIONS) 176 return -EINVAL; 177 178 size = array_size(nr_args, sizeof(*res)); 179 if (size == SIZE_MAX) 180 return -EOVERFLOW; 181 182 res = memdup_user(arg, size); 183 if (IS_ERR(res)) 184 return PTR_ERR(res); 185 186 ret = 0; 187 188 for (i = 0; i < nr_args; i++) { 189 switch (res[i].opcode) { 190 case IORING_RESTRICTION_REGISTER_OP: 191 if (res[i].register_op >= IORING_REGISTER_LAST) { 192 ret = -EINVAL; 193 goto out; 194 } 195 196 __set_bit(res[i].register_op, 197 ctx->restrictions.register_op); 198 break; 199 case IORING_RESTRICTION_SQE_OP: 200 if (res[i].sqe_op >= IORING_OP_LAST) { 201 ret = -EINVAL; 202 goto out; 203 } 204 205 __set_bit(res[i].sqe_op, ctx->restrictions.sqe_op); 206 break; 207 case IORING_RESTRICTION_SQE_FLAGS_ALLOWED: 208 ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags; 209 break; 210 case IORING_RESTRICTION_SQE_FLAGS_REQUIRED: 211 ctx->restrictions.sqe_flags_required = res[i].sqe_flags; 212 break; 213 default: 214 ret = -EINVAL; 215 goto out; 216 } 217 } 218 219 out: 220 /* Reset all restrictions if an error happened */ 221 if (ret != 0) 222 memset(&ctx->restrictions, 0, sizeof(ctx->restrictions)); 223 else 224 ctx->restrictions.registered = true; 225 226 kfree(res); 227 return ret; 228 } 229 230 static int io_register_enable_rings(struct io_ring_ctx *ctx) 231 { 232 if (!(ctx->flags & IORING_SETUP_R_DISABLED)) 233 return -EBADFD; 234 235 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task) { 236 WRITE_ONCE(ctx->submitter_task, get_task_struct(current)); 237 /* 238 * Lazy activation attempts would fail if it was polled before 239 * submitter_task is set. 240 */ 241 if (wq_has_sleeper(&ctx->poll_wq)) 242 io_activate_pollwq(ctx); 243 } 244 245 if (ctx->restrictions.registered) 246 ctx->restricted = 1; 247 248 ctx->flags &= ~IORING_SETUP_R_DISABLED; 249 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait)) 250 wake_up(&ctx->sq_data->wait); 251 return 0; 252 } 253 254 static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx, 255 cpumask_var_t new_mask) 256 { 257 int ret; 258 259 if (!(ctx->flags & IORING_SETUP_SQPOLL)) { 260 ret = io_wq_cpu_affinity(current->io_uring, new_mask); 261 } else { 262 mutex_unlock(&ctx->uring_lock); 263 ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask); 264 mutex_lock(&ctx->uring_lock); 265 } 266 267 return ret; 268 } 269 270 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx, 271 void __user *arg, unsigned len) 272 { 273 cpumask_var_t new_mask; 274 int ret; 275 276 if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) 277 return -ENOMEM; 278 279 cpumask_clear(new_mask); 280 if (len > cpumask_size()) 281 len = cpumask_size(); 282 283 #ifdef CONFIG_COMPAT 284 if (in_compat_syscall()) 285 ret = compat_get_bitmap(cpumask_bits(new_mask), 286 (const compat_ulong_t __user *)arg, 287 len * 8 /* CHAR_BIT */); 288 else 289 #endif 290 ret = copy_from_user(new_mask, arg, len); 291 292 if (ret) { 293 free_cpumask_var(new_mask); 294 return -EFAULT; 295 } 296 297 ret = __io_register_iowq_aff(ctx, new_mask); 298 free_cpumask_var(new_mask); 299 return ret; 300 } 301 302 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx) 303 { 304 return __io_register_iowq_aff(ctx, NULL); 305 } 306 307 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, 308 void __user *arg) 309 __must_hold(&ctx->uring_lock) 310 { 311 struct io_tctx_node *node; 312 struct io_uring_task *tctx = NULL; 313 struct io_sq_data *sqd = NULL; 314 __u32 new_count[2]; 315 int i, ret; 316 317 if (copy_from_user(new_count, arg, sizeof(new_count))) 318 return -EFAULT; 319 for (i = 0; i < ARRAY_SIZE(new_count); i++) 320 if (new_count[i] > INT_MAX) 321 return -EINVAL; 322 323 if (ctx->flags & IORING_SETUP_SQPOLL) { 324 sqd = ctx->sq_data; 325 if (sqd) { 326 /* 327 * Observe the correct sqd->lock -> ctx->uring_lock 328 * ordering. Fine to drop uring_lock here, we hold 329 * a ref to the ctx. 330 */ 331 refcount_inc(&sqd->refs); 332 mutex_unlock(&ctx->uring_lock); 333 mutex_lock(&sqd->lock); 334 mutex_lock(&ctx->uring_lock); 335 if (sqd->thread) 336 tctx = sqd->thread->io_uring; 337 } 338 } else { 339 tctx = current->io_uring; 340 } 341 342 BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits)); 343 344 for (i = 0; i < ARRAY_SIZE(new_count); i++) 345 if (new_count[i]) 346 ctx->iowq_limits[i] = new_count[i]; 347 ctx->iowq_limits_set = true; 348 349 if (tctx && tctx->io_wq) { 350 ret = io_wq_max_workers(tctx->io_wq, new_count); 351 if (ret) 352 goto err; 353 } else { 354 memset(new_count, 0, sizeof(new_count)); 355 } 356 357 if (sqd) { 358 mutex_unlock(&sqd->lock); 359 io_put_sq_data(sqd); 360 } 361 362 if (copy_to_user(arg, new_count, sizeof(new_count))) 363 return -EFAULT; 364 365 /* that's it for SQPOLL, only the SQPOLL task creates requests */ 366 if (sqd) 367 return 0; 368 369 /* now propagate the restriction to all registered users */ 370 list_for_each_entry(node, &ctx->tctx_list, ctx_node) { 371 tctx = node->task->io_uring; 372 if (WARN_ON_ONCE(!tctx->io_wq)) 373 continue; 374 375 for (i = 0; i < ARRAY_SIZE(new_count); i++) 376 new_count[i] = ctx->iowq_limits[i]; 377 /* ignore errors, it always returns zero anyway */ 378 (void)io_wq_max_workers(tctx->io_wq, new_count); 379 } 380 return 0; 381 err: 382 if (sqd) { 383 mutex_unlock(&sqd->lock); 384 io_put_sq_data(sqd); 385 } 386 return ret; 387 } 388 389 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode, 390 void __user *arg, unsigned nr_args) 391 __releases(ctx->uring_lock) 392 __acquires(ctx->uring_lock) 393 { 394 int ret; 395 396 /* 397 * We don't quiesce the refs for register anymore and so it can't be 398 * dying as we're holding a file ref here. 399 */ 400 if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs))) 401 return -ENXIO; 402 403 if (ctx->submitter_task && ctx->submitter_task != current) 404 return -EEXIST; 405 406 if (ctx->restricted) { 407 opcode = array_index_nospec(opcode, IORING_REGISTER_LAST); 408 if (!test_bit(opcode, ctx->restrictions.register_op)) 409 return -EACCES; 410 } 411 412 switch (opcode) { 413 case IORING_REGISTER_BUFFERS: 414 ret = -EFAULT; 415 if (!arg) 416 break; 417 ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL); 418 break; 419 case IORING_UNREGISTER_BUFFERS: 420 ret = -EINVAL; 421 if (arg || nr_args) 422 break; 423 ret = io_sqe_buffers_unregister(ctx); 424 break; 425 case IORING_REGISTER_FILES: 426 ret = -EFAULT; 427 if (!arg) 428 break; 429 ret = io_sqe_files_register(ctx, arg, nr_args, NULL); 430 break; 431 case IORING_UNREGISTER_FILES: 432 ret = -EINVAL; 433 if (arg || nr_args) 434 break; 435 ret = io_sqe_files_unregister(ctx); 436 break; 437 case IORING_REGISTER_FILES_UPDATE: 438 ret = io_register_files_update(ctx, arg, nr_args); 439 break; 440 case IORING_REGISTER_EVENTFD: 441 ret = -EINVAL; 442 if (nr_args != 1) 443 break; 444 ret = io_eventfd_register(ctx, arg, 0); 445 break; 446 case IORING_REGISTER_EVENTFD_ASYNC: 447 ret = -EINVAL; 448 if (nr_args != 1) 449 break; 450 ret = io_eventfd_register(ctx, arg, 1); 451 break; 452 case IORING_UNREGISTER_EVENTFD: 453 ret = -EINVAL; 454 if (arg || nr_args) 455 break; 456 ret = io_eventfd_unregister(ctx); 457 break; 458 case IORING_REGISTER_PROBE: 459 ret = -EINVAL; 460 if (!arg || nr_args > 256) 461 break; 462 ret = io_probe(ctx, arg, nr_args); 463 break; 464 case IORING_REGISTER_PERSONALITY: 465 ret = -EINVAL; 466 if (arg || nr_args) 467 break; 468 ret = io_register_personality(ctx); 469 break; 470 case IORING_UNREGISTER_PERSONALITY: 471 ret = -EINVAL; 472 if (arg) 473 break; 474 ret = io_unregister_personality(ctx, nr_args); 475 break; 476 case IORING_REGISTER_ENABLE_RINGS: 477 ret = -EINVAL; 478 if (arg || nr_args) 479 break; 480 ret = io_register_enable_rings(ctx); 481 break; 482 case IORING_REGISTER_RESTRICTIONS: 483 ret = io_register_restrictions(ctx, arg, nr_args); 484 break; 485 case IORING_REGISTER_FILES2: 486 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE); 487 break; 488 case IORING_REGISTER_FILES_UPDATE2: 489 ret = io_register_rsrc_update(ctx, arg, nr_args, 490 IORING_RSRC_FILE); 491 break; 492 case IORING_REGISTER_BUFFERS2: 493 ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER); 494 break; 495 case IORING_REGISTER_BUFFERS_UPDATE: 496 ret = io_register_rsrc_update(ctx, arg, nr_args, 497 IORING_RSRC_BUFFER); 498 break; 499 case IORING_REGISTER_IOWQ_AFF: 500 ret = -EINVAL; 501 if (!arg || !nr_args) 502 break; 503 ret = io_register_iowq_aff(ctx, arg, nr_args); 504 break; 505 case IORING_UNREGISTER_IOWQ_AFF: 506 ret = -EINVAL; 507 if (arg || nr_args) 508 break; 509 ret = io_unregister_iowq_aff(ctx); 510 break; 511 case IORING_REGISTER_IOWQ_MAX_WORKERS: 512 ret = -EINVAL; 513 if (!arg || nr_args != 2) 514 break; 515 ret = io_register_iowq_max_workers(ctx, arg); 516 break; 517 case IORING_REGISTER_RING_FDS: 518 ret = io_ringfd_register(ctx, arg, nr_args); 519 break; 520 case IORING_UNREGISTER_RING_FDS: 521 ret = io_ringfd_unregister(ctx, arg, nr_args); 522 break; 523 case IORING_REGISTER_PBUF_RING: 524 ret = -EINVAL; 525 if (!arg || nr_args != 1) 526 break; 527 ret = io_register_pbuf_ring(ctx, arg); 528 break; 529 case IORING_UNREGISTER_PBUF_RING: 530 ret = -EINVAL; 531 if (!arg || nr_args != 1) 532 break; 533 ret = io_unregister_pbuf_ring(ctx, arg); 534 break; 535 case IORING_REGISTER_SYNC_CANCEL: 536 ret = -EINVAL; 537 if (!arg || nr_args != 1) 538 break; 539 ret = io_sync_cancel(ctx, arg); 540 break; 541 case IORING_REGISTER_FILE_ALLOC_RANGE: 542 ret = -EINVAL; 543 if (!arg || nr_args) 544 break; 545 ret = io_register_file_alloc_range(ctx, arg); 546 break; 547 case IORING_REGISTER_PBUF_STATUS: 548 ret = -EINVAL; 549 if (!arg || nr_args != 1) 550 break; 551 ret = io_register_pbuf_status(ctx, arg); 552 break; 553 case IORING_REGISTER_NAPI: 554 ret = -EINVAL; 555 if (!arg || nr_args != 1) 556 break; 557 ret = io_register_napi(ctx, arg); 558 break; 559 case IORING_UNREGISTER_NAPI: 560 ret = -EINVAL; 561 if (nr_args != 1) 562 break; 563 ret = io_unregister_napi(ctx, arg); 564 break; 565 default: 566 ret = -EINVAL; 567 break; 568 } 569 570 return ret; 571 } 572 573 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, 574 void __user *, arg, unsigned int, nr_args) 575 { 576 struct io_ring_ctx *ctx; 577 long ret = -EBADF; 578 struct file *file; 579 bool use_registered_ring; 580 581 use_registered_ring = !!(opcode & IORING_REGISTER_USE_REGISTERED_RING); 582 opcode &= ~IORING_REGISTER_USE_REGISTERED_RING; 583 584 if (opcode >= IORING_REGISTER_LAST) 585 return -EINVAL; 586 587 if (use_registered_ring) { 588 /* 589 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we 590 * need only dereference our task private array to find it. 591 */ 592 struct io_uring_task *tctx = current->io_uring; 593 594 if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX)) 595 return -EINVAL; 596 fd = array_index_nospec(fd, IO_RINGFD_REG_MAX); 597 file = tctx->registered_rings[fd]; 598 if (unlikely(!file)) 599 return -EBADF; 600 } else { 601 file = fget(fd); 602 if (unlikely(!file)) 603 return -EBADF; 604 ret = -EOPNOTSUPP; 605 if (!io_is_uring_fops(file)) 606 goto out_fput; 607 } 608 609 ctx = file->private_data; 610 611 mutex_lock(&ctx->uring_lock); 612 ret = __io_uring_register(ctx, opcode, arg, nr_args); 613 mutex_unlock(&ctx->uring_lock); 614 trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret); 615 out_fput: 616 if (!use_registered_ring) 617 fput(file); 618 return ret; 619 } 620