1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/mm.h> 7 #include <linux/slab.h> 8 #include <linux/nospec.h> 9 #include <linux/hugetlb.h> 10 #include <linux/compat.h> 11 #include <linux/io_uring.h> 12 13 #include <uapi/linux/io_uring.h> 14 15 #include "io_uring.h" 16 #include "alloc_cache.h" 17 #include "openclose.h" 18 #include "rsrc.h" 19 #include "memmap.h" 20 21 struct io_rsrc_update { 22 struct file *file; 23 u64 arg; 24 u32 nr_args; 25 u32 offset; 26 }; 27 28 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc); 29 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, 30 struct io_mapped_ubuf **pimu, 31 struct page **last_hpage); 32 33 /* only define max */ 34 #define IORING_MAX_FIXED_FILES (1U << 20) 35 #define IORING_MAX_REG_BUFFERS (1U << 14) 36 37 static const struct io_mapped_ubuf dummy_ubuf = { 38 /* set invalid range, so io_import_fixed() fails meeting it */ 39 .ubuf = -1UL, 40 .ubuf_end = 0, 41 }; 42 43 int __io_account_mem(struct user_struct *user, unsigned long nr_pages) 44 { 45 unsigned long page_limit, cur_pages, new_pages; 46 47 if (!nr_pages) 48 return 0; 49 50 /* Don't allow more pages than we can safely lock */ 51 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 52 53 cur_pages = atomic_long_read(&user->locked_vm); 54 do { 55 new_pages = cur_pages + nr_pages; 56 if (new_pages > page_limit) 57 return -ENOMEM; 58 } while (!atomic_long_try_cmpxchg(&user->locked_vm, 59 &cur_pages, new_pages)); 60 return 0; 61 } 62 63 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) 64 { 65 if (ctx->user) 66 __io_unaccount_mem(ctx->user, nr_pages); 67 68 if (ctx->mm_account) 69 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); 70 } 71 72 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) 73 { 74 int ret; 75 76 if (ctx->user) { 77 ret = __io_account_mem(ctx->user, nr_pages); 78 if (ret) 79 return ret; 80 } 81 82 if (ctx->mm_account) 83 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm); 84 85 return 0; 86 } 87 88 static int io_buffer_validate(struct iovec *iov) 89 { 90 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1); 91 92 /* 93 * Don't impose further limits on the size and buffer 94 * constraints here, we'll -EINVAL later when IO is 95 * submitted if they are wrong. 96 */ 97 if (!iov->iov_base) 98 return iov->iov_len ? -EFAULT : 0; 99 if (!iov->iov_len) 100 return -EFAULT; 101 102 /* arbitrary limit, but we need something */ 103 if (iov->iov_len > SZ_1G) 104 return -EFAULT; 105 106 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp)) 107 return -EOVERFLOW; 108 109 return 0; 110 } 111 112 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot) 113 { 114 struct io_mapped_ubuf *imu = *slot; 115 unsigned int i; 116 117 if (imu != &dummy_ubuf) { 118 for (i = 0; i < imu->nr_bvecs; i++) 119 unpin_user_page(imu->bvec[i].bv_page); 120 if (imu->acct_pages) 121 io_unaccount_mem(ctx, imu->acct_pages); 122 kvfree(imu); 123 } 124 *slot = NULL; 125 } 126 127 static void io_rsrc_put_work(struct io_rsrc_node *node) 128 { 129 struct io_rsrc_put *prsrc = &node->item; 130 131 if (prsrc->tag) 132 io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0); 133 134 switch (node->type) { 135 case IORING_RSRC_FILE: 136 fput(prsrc->file); 137 break; 138 case IORING_RSRC_BUFFER: 139 io_rsrc_buf_put(node->ctx, prsrc); 140 break; 141 default: 142 WARN_ON_ONCE(1); 143 break; 144 } 145 } 146 147 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node) 148 { 149 if (!io_alloc_cache_put(&ctx->rsrc_node_cache, node)) 150 kfree(node); 151 } 152 153 void io_rsrc_node_ref_zero(struct io_rsrc_node *node) 154 __must_hold(&node->ctx->uring_lock) 155 { 156 struct io_ring_ctx *ctx = node->ctx; 157 158 while (!list_empty(&ctx->rsrc_ref_list)) { 159 node = list_first_entry(&ctx->rsrc_ref_list, 160 struct io_rsrc_node, node); 161 /* recycle ref nodes in order */ 162 if (node->refs) 163 break; 164 list_del(&node->node); 165 166 if (likely(!node->empty)) 167 io_rsrc_put_work(node); 168 io_rsrc_node_destroy(ctx, node); 169 } 170 if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce)) 171 wake_up_all(&ctx->rsrc_quiesce_wq); 172 } 173 174 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) 175 { 176 struct io_rsrc_node *ref_node; 177 178 ref_node = io_alloc_cache_get(&ctx->rsrc_node_cache); 179 if (!ref_node) { 180 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); 181 if (!ref_node) 182 return NULL; 183 } 184 185 ref_node->ctx = ctx; 186 ref_node->empty = 0; 187 ref_node->refs = 1; 188 return ref_node; 189 } 190 191 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, 192 struct io_ring_ctx *ctx) 193 { 194 struct io_rsrc_node *backup; 195 DEFINE_WAIT(we); 196 int ret; 197 198 /* As We may drop ->uring_lock, other task may have started quiesce */ 199 if (data->quiesce) 200 return -ENXIO; 201 202 backup = io_rsrc_node_alloc(ctx); 203 if (!backup) 204 return -ENOMEM; 205 ctx->rsrc_node->empty = true; 206 ctx->rsrc_node->type = -1; 207 list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list); 208 io_put_rsrc_node(ctx, ctx->rsrc_node); 209 ctx->rsrc_node = backup; 210 211 if (list_empty(&ctx->rsrc_ref_list)) 212 return 0; 213 214 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 215 atomic_set(&ctx->cq_wait_nr, 1); 216 smp_mb(); 217 } 218 219 ctx->rsrc_quiesce++; 220 data->quiesce = true; 221 do { 222 prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE); 223 mutex_unlock(&ctx->uring_lock); 224 225 ret = io_run_task_work_sig(ctx); 226 if (ret < 0) { 227 finish_wait(&ctx->rsrc_quiesce_wq, &we); 228 mutex_lock(&ctx->uring_lock); 229 if (list_empty(&ctx->rsrc_ref_list)) 230 ret = 0; 231 break; 232 } 233 234 schedule(); 235 mutex_lock(&ctx->uring_lock); 236 ret = 0; 237 } while (!list_empty(&ctx->rsrc_ref_list)); 238 239 finish_wait(&ctx->rsrc_quiesce_wq, &we); 240 data->quiesce = false; 241 ctx->rsrc_quiesce--; 242 243 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 244 atomic_set(&ctx->cq_wait_nr, 0); 245 smp_mb(); 246 } 247 return ret; 248 } 249 250 static void io_free_page_table(void **table, size_t size) 251 { 252 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); 253 254 for (i = 0; i < nr_tables; i++) 255 kfree(table[i]); 256 kfree(table); 257 } 258 259 static void io_rsrc_data_free(struct io_rsrc_data *data) 260 { 261 size_t size = data->nr * sizeof(data->tags[0][0]); 262 263 if (data->tags) 264 io_free_page_table((void **)data->tags, size); 265 kfree(data); 266 } 267 268 static __cold void **io_alloc_page_table(size_t size) 269 { 270 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); 271 size_t init_size = size; 272 void **table; 273 274 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT); 275 if (!table) 276 return NULL; 277 278 for (i = 0; i < nr_tables; i++) { 279 unsigned int this_size = min_t(size_t, size, PAGE_SIZE); 280 281 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT); 282 if (!table[i]) { 283 io_free_page_table(table, init_size); 284 return NULL; 285 } 286 size -= this_size; 287 } 288 return table; 289 } 290 291 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type, 292 u64 __user *utags, 293 unsigned nr, struct io_rsrc_data **pdata) 294 { 295 struct io_rsrc_data *data; 296 int ret = 0; 297 unsigned i; 298 299 data = kzalloc(sizeof(*data), GFP_KERNEL); 300 if (!data) 301 return -ENOMEM; 302 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0])); 303 if (!data->tags) { 304 kfree(data); 305 return -ENOMEM; 306 } 307 308 data->nr = nr; 309 data->ctx = ctx; 310 data->rsrc_type = type; 311 if (utags) { 312 ret = -EFAULT; 313 for (i = 0; i < nr; i++) { 314 u64 *tag_slot = io_get_tag_slot(data, i); 315 316 if (copy_from_user(tag_slot, &utags[i], 317 sizeof(*tag_slot))) 318 goto fail; 319 } 320 } 321 *pdata = data; 322 return 0; 323 fail: 324 io_rsrc_data_free(data); 325 return ret; 326 } 327 328 static int __io_sqe_files_update(struct io_ring_ctx *ctx, 329 struct io_uring_rsrc_update2 *up, 330 unsigned nr_args) 331 { 332 u64 __user *tags = u64_to_user_ptr(up->tags); 333 __s32 __user *fds = u64_to_user_ptr(up->data); 334 struct io_rsrc_data *data = ctx->file_data; 335 struct io_fixed_file *file_slot; 336 int fd, i, err = 0; 337 unsigned int done; 338 339 if (!ctx->file_data) 340 return -ENXIO; 341 if (up->offset + nr_args > ctx->nr_user_files) 342 return -EINVAL; 343 344 for (done = 0; done < nr_args; done++) { 345 u64 tag = 0; 346 347 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) || 348 copy_from_user(&fd, &fds[done], sizeof(fd))) { 349 err = -EFAULT; 350 break; 351 } 352 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) { 353 err = -EINVAL; 354 break; 355 } 356 if (fd == IORING_REGISTER_FILES_SKIP) 357 continue; 358 359 i = array_index_nospec(up->offset + done, ctx->nr_user_files); 360 file_slot = io_fixed_file_slot(&ctx->file_table, i); 361 362 if (file_slot->file_ptr) { 363 err = io_queue_rsrc_removal(data, i, 364 io_slot_file(file_slot)); 365 if (err) 366 break; 367 file_slot->file_ptr = 0; 368 io_file_bitmap_clear(&ctx->file_table, i); 369 } 370 if (fd != -1) { 371 struct file *file = fget(fd); 372 373 if (!file) { 374 err = -EBADF; 375 break; 376 } 377 /* 378 * Don't allow io_uring instances to be registered. 379 */ 380 if (io_is_uring_fops(file)) { 381 fput(file); 382 err = -EBADF; 383 break; 384 } 385 *io_get_tag_slot(data, i) = tag; 386 io_fixed_file_set(file_slot, file); 387 io_file_bitmap_set(&ctx->file_table, i); 388 } 389 } 390 return done ? done : err; 391 } 392 393 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, 394 struct io_uring_rsrc_update2 *up, 395 unsigned int nr_args) 396 { 397 u64 __user *tags = u64_to_user_ptr(up->tags); 398 struct iovec fast_iov, *iov; 399 struct page *last_hpage = NULL; 400 struct iovec __user *uvec; 401 u64 user_data = up->data; 402 __u32 done; 403 int i, err; 404 405 if (!ctx->buf_data) 406 return -ENXIO; 407 if (up->offset + nr_args > ctx->nr_user_bufs) 408 return -EINVAL; 409 410 for (done = 0; done < nr_args; done++) { 411 struct io_mapped_ubuf *imu; 412 u64 tag = 0; 413 414 uvec = u64_to_user_ptr(user_data); 415 iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat); 416 if (IS_ERR(iov)) { 417 err = PTR_ERR(iov); 418 break; 419 } 420 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) { 421 err = -EFAULT; 422 break; 423 } 424 err = io_buffer_validate(iov); 425 if (err) 426 break; 427 if (!iov->iov_base && tag) { 428 err = -EINVAL; 429 break; 430 } 431 err = io_sqe_buffer_register(ctx, iov, &imu, &last_hpage); 432 if (err) 433 break; 434 435 i = array_index_nospec(up->offset + done, ctx->nr_user_bufs); 436 if (ctx->user_bufs[i] != &dummy_ubuf) { 437 err = io_queue_rsrc_removal(ctx->buf_data, i, 438 ctx->user_bufs[i]); 439 if (unlikely(err)) { 440 io_buffer_unmap(ctx, &imu); 441 break; 442 } 443 ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf; 444 } 445 446 ctx->user_bufs[i] = imu; 447 *io_get_tag_slot(ctx->buf_data, i) = tag; 448 if (ctx->compat) 449 user_data += sizeof(struct compat_iovec); 450 else 451 user_data += sizeof(struct iovec); 452 } 453 return done ? done : err; 454 } 455 456 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, 457 struct io_uring_rsrc_update2 *up, 458 unsigned nr_args) 459 { 460 __u32 tmp; 461 462 lockdep_assert_held(&ctx->uring_lock); 463 464 if (check_add_overflow(up->offset, nr_args, &tmp)) 465 return -EOVERFLOW; 466 467 switch (type) { 468 case IORING_RSRC_FILE: 469 return __io_sqe_files_update(ctx, up, nr_args); 470 case IORING_RSRC_BUFFER: 471 return __io_sqe_buffers_update(ctx, up, nr_args); 472 } 473 return -EINVAL; 474 } 475 476 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, 477 unsigned nr_args) 478 { 479 struct io_uring_rsrc_update2 up; 480 481 if (!nr_args) 482 return -EINVAL; 483 memset(&up, 0, sizeof(up)); 484 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update))) 485 return -EFAULT; 486 if (up.resv || up.resv2) 487 return -EINVAL; 488 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args); 489 } 490 491 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, 492 unsigned size, unsigned type) 493 { 494 struct io_uring_rsrc_update2 up; 495 496 if (size != sizeof(up)) 497 return -EINVAL; 498 if (copy_from_user(&up, arg, sizeof(up))) 499 return -EFAULT; 500 if (!up.nr || up.resv || up.resv2) 501 return -EINVAL; 502 return __io_register_rsrc_update(ctx, type, &up, up.nr); 503 } 504 505 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, 506 unsigned int size, unsigned int type) 507 { 508 struct io_uring_rsrc_register rr; 509 510 /* keep it extendible */ 511 if (size != sizeof(rr)) 512 return -EINVAL; 513 514 memset(&rr, 0, sizeof(rr)); 515 if (copy_from_user(&rr, arg, size)) 516 return -EFAULT; 517 if (!rr.nr || rr.resv2) 518 return -EINVAL; 519 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE) 520 return -EINVAL; 521 522 switch (type) { 523 case IORING_RSRC_FILE: 524 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data) 525 break; 526 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data), 527 rr.nr, u64_to_user_ptr(rr.tags)); 528 case IORING_RSRC_BUFFER: 529 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data) 530 break; 531 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data), 532 rr.nr, u64_to_user_ptr(rr.tags)); 533 } 534 return -EINVAL; 535 } 536 537 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 538 { 539 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 540 541 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 542 return -EINVAL; 543 if (sqe->rw_flags || sqe->splice_fd_in) 544 return -EINVAL; 545 546 up->offset = READ_ONCE(sqe->off); 547 up->nr_args = READ_ONCE(sqe->len); 548 if (!up->nr_args) 549 return -EINVAL; 550 up->arg = READ_ONCE(sqe->addr); 551 return 0; 552 } 553 554 static int io_files_update_with_index_alloc(struct io_kiocb *req, 555 unsigned int issue_flags) 556 { 557 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 558 __s32 __user *fds = u64_to_user_ptr(up->arg); 559 unsigned int done; 560 struct file *file; 561 int ret, fd; 562 563 if (!req->ctx->file_data) 564 return -ENXIO; 565 566 for (done = 0; done < up->nr_args; done++) { 567 if (copy_from_user(&fd, &fds[done], sizeof(fd))) { 568 ret = -EFAULT; 569 break; 570 } 571 572 file = fget(fd); 573 if (!file) { 574 ret = -EBADF; 575 break; 576 } 577 ret = io_fixed_fd_install(req, issue_flags, file, 578 IORING_FILE_INDEX_ALLOC); 579 if (ret < 0) 580 break; 581 if (copy_to_user(&fds[done], &ret, sizeof(ret))) { 582 __io_close_fixed(req->ctx, issue_flags, ret); 583 ret = -EFAULT; 584 break; 585 } 586 } 587 588 if (done) 589 return done; 590 return ret; 591 } 592 593 int io_files_update(struct io_kiocb *req, unsigned int issue_flags) 594 { 595 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 596 struct io_ring_ctx *ctx = req->ctx; 597 struct io_uring_rsrc_update2 up2; 598 int ret; 599 600 up2.offset = up->offset; 601 up2.data = up->arg; 602 up2.nr = 0; 603 up2.tags = 0; 604 up2.resv = 0; 605 up2.resv2 = 0; 606 607 if (up->offset == IORING_FILE_INDEX_ALLOC) { 608 ret = io_files_update_with_index_alloc(req, issue_flags); 609 } else { 610 io_ring_submit_lock(ctx, issue_flags); 611 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, 612 &up2, up->nr_args); 613 io_ring_submit_unlock(ctx, issue_flags); 614 } 615 616 if (ret < 0) 617 req_set_fail(req); 618 io_req_set_res(req, ret, 0); 619 return IOU_OK; 620 } 621 622 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc) 623 { 624 struct io_ring_ctx *ctx = data->ctx; 625 struct io_rsrc_node *node = ctx->rsrc_node; 626 u64 *tag_slot = io_get_tag_slot(data, idx); 627 628 ctx->rsrc_node = io_rsrc_node_alloc(ctx); 629 if (unlikely(!ctx->rsrc_node)) { 630 ctx->rsrc_node = node; 631 return -ENOMEM; 632 } 633 634 node->item.rsrc = rsrc; 635 node->type = data->rsrc_type; 636 node->item.tag = *tag_slot; 637 *tag_slot = 0; 638 list_add_tail(&node->node, &ctx->rsrc_ref_list); 639 io_put_rsrc_node(ctx, node); 640 return 0; 641 } 642 643 void __io_sqe_files_unregister(struct io_ring_ctx *ctx) 644 { 645 int i; 646 647 for (i = 0; i < ctx->nr_user_files; i++) { 648 struct file *file = io_file_from_index(&ctx->file_table, i); 649 650 if (!file) 651 continue; 652 io_file_bitmap_clear(&ctx->file_table, i); 653 fput(file); 654 } 655 656 io_free_file_tables(&ctx->file_table); 657 io_file_table_set_alloc_range(ctx, 0, 0); 658 io_rsrc_data_free(ctx->file_data); 659 ctx->file_data = NULL; 660 ctx->nr_user_files = 0; 661 } 662 663 int io_sqe_files_unregister(struct io_ring_ctx *ctx) 664 { 665 unsigned nr = ctx->nr_user_files; 666 int ret; 667 668 if (!ctx->file_data) 669 return -ENXIO; 670 671 /* 672 * Quiesce may unlock ->uring_lock, and while it's not held 673 * prevent new requests using the table. 674 */ 675 ctx->nr_user_files = 0; 676 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx); 677 ctx->nr_user_files = nr; 678 if (!ret) 679 __io_sqe_files_unregister(ctx); 680 return ret; 681 } 682 683 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, 684 unsigned nr_args, u64 __user *tags) 685 { 686 __s32 __user *fds = (__s32 __user *) arg; 687 struct file *file; 688 int fd, ret; 689 unsigned i; 690 691 if (ctx->file_data) 692 return -EBUSY; 693 if (!nr_args) 694 return -EINVAL; 695 if (nr_args > IORING_MAX_FIXED_FILES) 696 return -EMFILE; 697 if (nr_args > rlimit(RLIMIT_NOFILE)) 698 return -EMFILE; 699 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args, 700 &ctx->file_data); 701 if (ret) 702 return ret; 703 704 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) { 705 io_rsrc_data_free(ctx->file_data); 706 ctx->file_data = NULL; 707 return -ENOMEM; 708 } 709 710 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { 711 struct io_fixed_file *file_slot; 712 713 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) { 714 ret = -EFAULT; 715 goto fail; 716 } 717 /* allow sparse sets */ 718 if (!fds || fd == -1) { 719 ret = -EINVAL; 720 if (unlikely(*io_get_tag_slot(ctx->file_data, i))) 721 goto fail; 722 continue; 723 } 724 725 file = fget(fd); 726 ret = -EBADF; 727 if (unlikely(!file)) 728 goto fail; 729 730 /* 731 * Don't allow io_uring instances to be registered. 732 */ 733 if (io_is_uring_fops(file)) { 734 fput(file); 735 goto fail; 736 } 737 file_slot = io_fixed_file_slot(&ctx->file_table, i); 738 io_fixed_file_set(file_slot, file); 739 io_file_bitmap_set(&ctx->file_table, i); 740 } 741 742 /* default it to the whole table */ 743 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files); 744 return 0; 745 fail: 746 __io_sqe_files_unregister(ctx); 747 return ret; 748 } 749 750 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) 751 { 752 io_buffer_unmap(ctx, &prsrc->buf); 753 prsrc->buf = NULL; 754 } 755 756 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx) 757 { 758 unsigned int i; 759 760 for (i = 0; i < ctx->nr_user_bufs; i++) 761 io_buffer_unmap(ctx, &ctx->user_bufs[i]); 762 kfree(ctx->user_bufs); 763 io_rsrc_data_free(ctx->buf_data); 764 ctx->user_bufs = NULL; 765 ctx->buf_data = NULL; 766 ctx->nr_user_bufs = 0; 767 } 768 769 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) 770 { 771 unsigned nr = ctx->nr_user_bufs; 772 int ret; 773 774 if (!ctx->buf_data) 775 return -ENXIO; 776 777 /* 778 * Quiesce may unlock ->uring_lock, and while it's not held 779 * prevent new requests using the table. 780 */ 781 ctx->nr_user_bufs = 0; 782 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx); 783 ctx->nr_user_bufs = nr; 784 if (!ret) 785 __io_sqe_buffers_unregister(ctx); 786 return ret; 787 } 788 789 /* 790 * Not super efficient, but this is just a registration time. And we do cache 791 * the last compound head, so generally we'll only do a full search if we don't 792 * match that one. 793 * 794 * We check if the given compound head page has already been accounted, to 795 * avoid double accounting it. This allows us to account the full size of the 796 * page, not just the constituent pages of a huge page. 797 */ 798 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, 799 int nr_pages, struct page *hpage) 800 { 801 int i, j; 802 803 /* check current page array */ 804 for (i = 0; i < nr_pages; i++) { 805 if (!PageCompound(pages[i])) 806 continue; 807 if (compound_head(pages[i]) == hpage) 808 return true; 809 } 810 811 /* check previously registered pages */ 812 for (i = 0; i < ctx->nr_user_bufs; i++) { 813 struct io_mapped_ubuf *imu = ctx->user_bufs[i]; 814 815 for (j = 0; j < imu->nr_bvecs; j++) { 816 if (!PageCompound(imu->bvec[j].bv_page)) 817 continue; 818 if (compound_head(imu->bvec[j].bv_page) == hpage) 819 return true; 820 } 821 } 822 823 return false; 824 } 825 826 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, 827 int nr_pages, struct io_mapped_ubuf *imu, 828 struct page **last_hpage) 829 { 830 int i, ret; 831 832 imu->acct_pages = 0; 833 for (i = 0; i < nr_pages; i++) { 834 if (!PageCompound(pages[i])) { 835 imu->acct_pages++; 836 } else { 837 struct page *hpage; 838 839 hpage = compound_head(pages[i]); 840 if (hpage == *last_hpage) 841 continue; 842 *last_hpage = hpage; 843 if (headpage_already_acct(ctx, pages, i, hpage)) 844 continue; 845 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT; 846 } 847 } 848 849 if (!imu->acct_pages) 850 return 0; 851 852 ret = io_account_mem(ctx, imu->acct_pages); 853 if (ret) 854 imu->acct_pages = 0; 855 return ret; 856 } 857 858 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, 859 struct io_mapped_ubuf **pimu, 860 struct page **last_hpage) 861 { 862 struct io_mapped_ubuf *imu = NULL; 863 struct page **pages = NULL; 864 unsigned long off; 865 size_t size; 866 int ret, nr_pages, i; 867 struct folio *folio = NULL; 868 869 *pimu = (struct io_mapped_ubuf *)&dummy_ubuf; 870 if (!iov->iov_base) 871 return 0; 872 873 ret = -ENOMEM; 874 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len, 875 &nr_pages); 876 if (IS_ERR(pages)) { 877 ret = PTR_ERR(pages); 878 pages = NULL; 879 goto done; 880 } 881 882 /* If it's a huge page, try to coalesce them into a single bvec entry */ 883 if (nr_pages > 1) { 884 folio = page_folio(pages[0]); 885 for (i = 1; i < nr_pages; i++) { 886 /* 887 * Pages must be consecutive and on the same folio for 888 * this to work 889 */ 890 if (page_folio(pages[i]) != folio || 891 pages[i] != pages[i - 1] + 1) { 892 folio = NULL; 893 break; 894 } 895 } 896 if (folio) { 897 /* 898 * The pages are bound to the folio, it doesn't 899 * actually unpin them but drops all but one reference, 900 * which is usually put down by io_buffer_unmap(). 901 * Note, needs a better helper. 902 */ 903 unpin_user_pages(&pages[1], nr_pages - 1); 904 nr_pages = 1; 905 } 906 } 907 908 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); 909 if (!imu) 910 goto done; 911 912 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage); 913 if (ret) { 914 unpin_user_pages(pages, nr_pages); 915 goto done; 916 } 917 918 off = (unsigned long) iov->iov_base & ~PAGE_MASK; 919 size = iov->iov_len; 920 /* store original address for later verification */ 921 imu->ubuf = (unsigned long) iov->iov_base; 922 imu->ubuf_end = imu->ubuf + iov->iov_len; 923 imu->nr_bvecs = nr_pages; 924 *pimu = imu; 925 ret = 0; 926 927 if (folio) { 928 bvec_set_page(&imu->bvec[0], pages[0], size, off); 929 goto done; 930 } 931 for (i = 0; i < nr_pages; i++) { 932 size_t vec_len; 933 934 vec_len = min_t(size_t, size, PAGE_SIZE - off); 935 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off); 936 off = 0; 937 size -= vec_len; 938 } 939 done: 940 if (ret) 941 kvfree(imu); 942 kvfree(pages); 943 return ret; 944 } 945 946 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args) 947 { 948 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL); 949 return ctx->user_bufs ? 0 : -ENOMEM; 950 } 951 952 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, 953 unsigned int nr_args, u64 __user *tags) 954 { 955 struct page *last_hpage = NULL; 956 struct io_rsrc_data *data; 957 struct iovec fast_iov, *iov = &fast_iov; 958 const struct iovec __user *uvec; 959 int i, ret; 960 961 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16)); 962 963 if (ctx->user_bufs) 964 return -EBUSY; 965 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS) 966 return -EINVAL; 967 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data); 968 if (ret) 969 return ret; 970 ret = io_buffers_map_alloc(ctx, nr_args); 971 if (ret) { 972 io_rsrc_data_free(data); 973 return ret; 974 } 975 976 if (!arg) 977 memset(iov, 0, sizeof(*iov)); 978 979 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) { 980 if (arg) { 981 uvec = (struct iovec __user *) arg; 982 iov = iovec_from_user(uvec, 1, 1, &fast_iov, ctx->compat); 983 if (IS_ERR(iov)) { 984 ret = PTR_ERR(iov); 985 break; 986 } 987 ret = io_buffer_validate(iov); 988 if (ret) 989 break; 990 if (ctx->compat) 991 arg += sizeof(struct compat_iovec); 992 else 993 arg += sizeof(struct iovec); 994 } 995 996 if (!iov->iov_base && *io_get_tag_slot(data, i)) { 997 ret = -EINVAL; 998 break; 999 } 1000 1001 ret = io_sqe_buffer_register(ctx, iov, &ctx->user_bufs[i], 1002 &last_hpage); 1003 if (ret) 1004 break; 1005 } 1006 1007 WARN_ON_ONCE(ctx->buf_data); 1008 1009 ctx->buf_data = data; 1010 if (ret) 1011 __io_sqe_buffers_unregister(ctx); 1012 return ret; 1013 } 1014 1015 int io_import_fixed(int ddir, struct iov_iter *iter, 1016 struct io_mapped_ubuf *imu, 1017 u64 buf_addr, size_t len) 1018 { 1019 u64 buf_end; 1020 size_t offset; 1021 1022 if (WARN_ON_ONCE(!imu)) 1023 return -EFAULT; 1024 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end))) 1025 return -EFAULT; 1026 /* not inside the mapped region */ 1027 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end)) 1028 return -EFAULT; 1029 1030 /* 1031 * Might not be a start of buffer, set size appropriately 1032 * and advance us to the beginning. 1033 */ 1034 offset = buf_addr - imu->ubuf; 1035 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len); 1036 1037 if (offset) { 1038 /* 1039 * Don't use iov_iter_advance() here, as it's really slow for 1040 * using the latter parts of a big fixed buffer - it iterates 1041 * over each segment manually. We can cheat a bit here, because 1042 * we know that: 1043 * 1044 * 1) it's a BVEC iter, we set it up 1045 * 2) all bvecs are PAGE_SIZE in size, except potentially the 1046 * first and last bvec 1047 * 1048 * So just find our index, and adjust the iterator afterwards. 1049 * If the offset is within the first bvec (or the whole first 1050 * bvec, just use iov_iter_advance(). This makes it easier 1051 * since we can just skip the first segment, which may not 1052 * be PAGE_SIZE aligned. 1053 */ 1054 const struct bio_vec *bvec = imu->bvec; 1055 1056 if (offset < bvec->bv_len) { 1057 /* 1058 * Note, huge pages buffers consists of one large 1059 * bvec entry and should always go this way. The other 1060 * branch doesn't expect non PAGE_SIZE'd chunks. 1061 */ 1062 iter->bvec = bvec; 1063 iter->count -= offset; 1064 iter->iov_offset = offset; 1065 } else { 1066 unsigned long seg_skip; 1067 1068 /* skip first vec */ 1069 offset -= bvec->bv_len; 1070 seg_skip = 1 + (offset >> PAGE_SHIFT); 1071 1072 iter->bvec = bvec + seg_skip; 1073 iter->nr_segs -= seg_skip; 1074 iter->count -= bvec->bv_len + offset; 1075 iter->iov_offset = offset & ~PAGE_MASK; 1076 } 1077 } 1078 1079 return 0; 1080 } 1081