1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/mm.h> 7 #include <linux/slab.h> 8 #include <linux/nospec.h> 9 #include <linux/hugetlb.h> 10 #include <linux/compat.h> 11 #include <linux/io_uring.h> 12 13 #include <uapi/linux/io_uring.h> 14 15 #include "io_uring.h" 16 #include "openclose.h" 17 #include "rsrc.h" 18 19 struct io_rsrc_update { 20 struct file *file; 21 u64 arg; 22 u32 nr_args; 23 u32 offset; 24 }; 25 26 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc); 27 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc); 28 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, 29 struct io_mapped_ubuf **pimu, 30 struct page **last_hpage); 31 32 /* only define max */ 33 #define IORING_MAX_FIXED_FILES (1U << 20) 34 #define IORING_MAX_REG_BUFFERS (1U << 14) 35 36 int __io_account_mem(struct user_struct *user, unsigned long nr_pages) 37 { 38 unsigned long page_limit, cur_pages, new_pages; 39 40 if (!nr_pages) 41 return 0; 42 43 /* Don't allow more pages than we can safely lock */ 44 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 45 46 cur_pages = atomic_long_read(&user->locked_vm); 47 do { 48 new_pages = cur_pages + nr_pages; 49 if (new_pages > page_limit) 50 return -ENOMEM; 51 } while (!atomic_long_try_cmpxchg(&user->locked_vm, 52 &cur_pages, new_pages)); 53 return 0; 54 } 55 56 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) 57 { 58 if (ctx->user) 59 __io_unaccount_mem(ctx->user, nr_pages); 60 61 if (ctx->mm_account) 62 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); 63 } 64 65 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) 66 { 67 int ret; 68 69 if (ctx->user) { 70 ret = __io_account_mem(ctx->user, nr_pages); 71 if (ret) 72 return ret; 73 } 74 75 if (ctx->mm_account) 76 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm); 77 78 return 0; 79 } 80 81 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, 82 void __user *arg, unsigned index) 83 { 84 struct iovec __user *src; 85 86 #ifdef CONFIG_COMPAT 87 if (ctx->compat) { 88 struct compat_iovec __user *ciovs; 89 struct compat_iovec ciov; 90 91 ciovs = (struct compat_iovec __user *) arg; 92 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov))) 93 return -EFAULT; 94 95 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base); 96 dst->iov_len = ciov.iov_len; 97 return 0; 98 } 99 #endif 100 src = (struct iovec __user *) arg; 101 if (copy_from_user(dst, &src[index], sizeof(*dst))) 102 return -EFAULT; 103 return 0; 104 } 105 106 static int io_buffer_validate(struct iovec *iov) 107 { 108 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1); 109 110 /* 111 * Don't impose further limits on the size and buffer 112 * constraints here, we'll -EINVAL later when IO is 113 * submitted if they are wrong. 114 */ 115 if (!iov->iov_base) 116 return iov->iov_len ? -EFAULT : 0; 117 if (!iov->iov_len) 118 return -EFAULT; 119 120 /* arbitrary limit, but we need something */ 121 if (iov->iov_len > SZ_1G) 122 return -EFAULT; 123 124 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp)) 125 return -EOVERFLOW; 126 127 return 0; 128 } 129 130 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot) 131 { 132 struct io_mapped_ubuf *imu = *slot; 133 unsigned int i; 134 135 if (imu != ctx->dummy_ubuf) { 136 for (i = 0; i < imu->nr_bvecs; i++) 137 unpin_user_page(imu->bvec[i].bv_page); 138 if (imu->acct_pages) 139 io_unaccount_mem(ctx, imu->acct_pages); 140 kvfree(imu); 141 } 142 *slot = NULL; 143 } 144 145 static void io_rsrc_put_work(struct io_rsrc_node *node) 146 { 147 struct io_rsrc_put *prsrc = &node->item; 148 149 if (prsrc->tag) 150 io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0); 151 152 switch (node->type) { 153 case IORING_RSRC_FILE: 154 io_rsrc_file_put(node->ctx, prsrc); 155 break; 156 case IORING_RSRC_BUFFER: 157 io_rsrc_buf_put(node->ctx, prsrc); 158 break; 159 default: 160 WARN_ON_ONCE(1); 161 break; 162 } 163 } 164 165 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node) 166 { 167 if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache)) 168 kfree(node); 169 } 170 171 void io_rsrc_node_ref_zero(struct io_rsrc_node *node) 172 __must_hold(&node->ctx->uring_lock) 173 { 174 struct io_ring_ctx *ctx = node->ctx; 175 176 while (!list_empty(&ctx->rsrc_ref_list)) { 177 node = list_first_entry(&ctx->rsrc_ref_list, 178 struct io_rsrc_node, node); 179 /* recycle ref nodes in order */ 180 if (node->refs) 181 break; 182 list_del(&node->node); 183 184 if (likely(!node->empty)) 185 io_rsrc_put_work(node); 186 io_rsrc_node_destroy(ctx, node); 187 } 188 if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce)) 189 wake_up_all(&ctx->rsrc_quiesce_wq); 190 } 191 192 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx) 193 { 194 struct io_rsrc_node *ref_node; 195 struct io_cache_entry *entry; 196 197 entry = io_alloc_cache_get(&ctx->rsrc_node_cache); 198 if (entry) { 199 ref_node = container_of(entry, struct io_rsrc_node, cache); 200 } else { 201 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); 202 if (!ref_node) 203 return NULL; 204 } 205 206 ref_node->ctx = ctx; 207 ref_node->empty = 0; 208 ref_node->refs = 1; 209 return ref_node; 210 } 211 212 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, 213 struct io_ring_ctx *ctx) 214 { 215 struct io_rsrc_node *backup; 216 DEFINE_WAIT(we); 217 int ret; 218 219 /* As We may drop ->uring_lock, other task may have started quiesce */ 220 if (data->quiesce) 221 return -ENXIO; 222 223 backup = io_rsrc_node_alloc(ctx); 224 if (!backup) 225 return -ENOMEM; 226 ctx->rsrc_node->empty = true; 227 ctx->rsrc_node->type = -1; 228 list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list); 229 io_put_rsrc_node(ctx, ctx->rsrc_node); 230 ctx->rsrc_node = backup; 231 232 if (list_empty(&ctx->rsrc_ref_list)) 233 return 0; 234 235 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 236 atomic_set(&ctx->cq_wait_nr, 1); 237 smp_mb(); 238 } 239 240 ctx->rsrc_quiesce++; 241 data->quiesce = true; 242 do { 243 prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE); 244 mutex_unlock(&ctx->uring_lock); 245 246 ret = io_run_task_work_sig(ctx); 247 if (ret < 0) { 248 mutex_lock(&ctx->uring_lock); 249 if (list_empty(&ctx->rsrc_ref_list)) 250 ret = 0; 251 break; 252 } 253 254 schedule(); 255 __set_current_state(TASK_RUNNING); 256 mutex_lock(&ctx->uring_lock); 257 ret = 0; 258 } while (!list_empty(&ctx->rsrc_ref_list)); 259 260 finish_wait(&ctx->rsrc_quiesce_wq, &we); 261 data->quiesce = false; 262 ctx->rsrc_quiesce--; 263 264 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) { 265 atomic_set(&ctx->cq_wait_nr, 0); 266 smp_mb(); 267 } 268 return ret; 269 } 270 271 static void io_free_page_table(void **table, size_t size) 272 { 273 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); 274 275 for (i = 0; i < nr_tables; i++) 276 kfree(table[i]); 277 kfree(table); 278 } 279 280 static void io_rsrc_data_free(struct io_rsrc_data *data) 281 { 282 size_t size = data->nr * sizeof(data->tags[0][0]); 283 284 if (data->tags) 285 io_free_page_table((void **)data->tags, size); 286 kfree(data); 287 } 288 289 static __cold void **io_alloc_page_table(size_t size) 290 { 291 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); 292 size_t init_size = size; 293 void **table; 294 295 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT); 296 if (!table) 297 return NULL; 298 299 for (i = 0; i < nr_tables; i++) { 300 unsigned int this_size = min_t(size_t, size, PAGE_SIZE); 301 302 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT); 303 if (!table[i]) { 304 io_free_page_table(table, init_size); 305 return NULL; 306 } 307 size -= this_size; 308 } 309 return table; 310 } 311 312 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type, 313 u64 __user *utags, 314 unsigned nr, struct io_rsrc_data **pdata) 315 { 316 struct io_rsrc_data *data; 317 int ret = 0; 318 unsigned i; 319 320 data = kzalloc(sizeof(*data), GFP_KERNEL); 321 if (!data) 322 return -ENOMEM; 323 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0])); 324 if (!data->tags) { 325 kfree(data); 326 return -ENOMEM; 327 } 328 329 data->nr = nr; 330 data->ctx = ctx; 331 data->rsrc_type = type; 332 if (utags) { 333 ret = -EFAULT; 334 for (i = 0; i < nr; i++) { 335 u64 *tag_slot = io_get_tag_slot(data, i); 336 337 if (copy_from_user(tag_slot, &utags[i], 338 sizeof(*tag_slot))) 339 goto fail; 340 } 341 } 342 *pdata = data; 343 return 0; 344 fail: 345 io_rsrc_data_free(data); 346 return ret; 347 } 348 349 static int __io_sqe_files_update(struct io_ring_ctx *ctx, 350 struct io_uring_rsrc_update2 *up, 351 unsigned nr_args) 352 { 353 u64 __user *tags = u64_to_user_ptr(up->tags); 354 __s32 __user *fds = u64_to_user_ptr(up->data); 355 struct io_rsrc_data *data = ctx->file_data; 356 struct io_fixed_file *file_slot; 357 struct file *file; 358 int fd, i, err = 0; 359 unsigned int done; 360 361 if (!ctx->file_data) 362 return -ENXIO; 363 if (up->offset + nr_args > ctx->nr_user_files) 364 return -EINVAL; 365 366 for (done = 0; done < nr_args; done++) { 367 u64 tag = 0; 368 369 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) || 370 copy_from_user(&fd, &fds[done], sizeof(fd))) { 371 err = -EFAULT; 372 break; 373 } 374 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) { 375 err = -EINVAL; 376 break; 377 } 378 if (fd == IORING_REGISTER_FILES_SKIP) 379 continue; 380 381 i = array_index_nospec(up->offset + done, ctx->nr_user_files); 382 file_slot = io_fixed_file_slot(&ctx->file_table, i); 383 384 if (file_slot->file_ptr) { 385 file = (struct file *)(file_slot->file_ptr & FFS_MASK); 386 err = io_queue_rsrc_removal(data, i, file); 387 if (err) 388 break; 389 file_slot->file_ptr = 0; 390 io_file_bitmap_clear(&ctx->file_table, i); 391 } 392 if (fd != -1) { 393 file = fget(fd); 394 if (!file) { 395 err = -EBADF; 396 break; 397 } 398 /* 399 * Don't allow io_uring instances to be registered. If 400 * UNIX isn't enabled, then this causes a reference 401 * cycle and this instance can never get freed. If UNIX 402 * is enabled we'll handle it just fine, but there's 403 * still no point in allowing a ring fd as it doesn't 404 * support regular read/write anyway. 405 */ 406 if (io_is_uring_fops(file)) { 407 fput(file); 408 err = -EBADF; 409 break; 410 } 411 err = io_scm_file_account(ctx, file); 412 if (err) { 413 fput(file); 414 break; 415 } 416 *io_get_tag_slot(data, i) = tag; 417 io_fixed_file_set(file_slot, file); 418 io_file_bitmap_set(&ctx->file_table, i); 419 } 420 } 421 return done ? done : err; 422 } 423 424 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, 425 struct io_uring_rsrc_update2 *up, 426 unsigned int nr_args) 427 { 428 u64 __user *tags = u64_to_user_ptr(up->tags); 429 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data); 430 struct page *last_hpage = NULL; 431 __u32 done; 432 int i, err; 433 434 if (!ctx->buf_data) 435 return -ENXIO; 436 if (up->offset + nr_args > ctx->nr_user_bufs) 437 return -EINVAL; 438 439 for (done = 0; done < nr_args; done++) { 440 struct io_mapped_ubuf *imu; 441 u64 tag = 0; 442 443 err = io_copy_iov(ctx, &iov, iovs, done); 444 if (err) 445 break; 446 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) { 447 err = -EFAULT; 448 break; 449 } 450 err = io_buffer_validate(&iov); 451 if (err) 452 break; 453 if (!iov.iov_base && tag) { 454 err = -EINVAL; 455 break; 456 } 457 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage); 458 if (err) 459 break; 460 461 i = array_index_nospec(up->offset + done, ctx->nr_user_bufs); 462 if (ctx->user_bufs[i] != ctx->dummy_ubuf) { 463 err = io_queue_rsrc_removal(ctx->buf_data, i, 464 ctx->user_bufs[i]); 465 if (unlikely(err)) { 466 io_buffer_unmap(ctx, &imu); 467 break; 468 } 469 ctx->user_bufs[i] = ctx->dummy_ubuf; 470 } 471 472 ctx->user_bufs[i] = imu; 473 *io_get_tag_slot(ctx->buf_data, i) = tag; 474 } 475 return done ? done : err; 476 } 477 478 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, 479 struct io_uring_rsrc_update2 *up, 480 unsigned nr_args) 481 { 482 __u32 tmp; 483 484 lockdep_assert_held(&ctx->uring_lock); 485 486 if (check_add_overflow(up->offset, nr_args, &tmp)) 487 return -EOVERFLOW; 488 489 switch (type) { 490 case IORING_RSRC_FILE: 491 return __io_sqe_files_update(ctx, up, nr_args); 492 case IORING_RSRC_BUFFER: 493 return __io_sqe_buffers_update(ctx, up, nr_args); 494 } 495 return -EINVAL; 496 } 497 498 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, 499 unsigned nr_args) 500 { 501 struct io_uring_rsrc_update2 up; 502 503 if (!nr_args) 504 return -EINVAL; 505 memset(&up, 0, sizeof(up)); 506 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update))) 507 return -EFAULT; 508 if (up.resv || up.resv2) 509 return -EINVAL; 510 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args); 511 } 512 513 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, 514 unsigned size, unsigned type) 515 { 516 struct io_uring_rsrc_update2 up; 517 518 if (size != sizeof(up)) 519 return -EINVAL; 520 if (copy_from_user(&up, arg, sizeof(up))) 521 return -EFAULT; 522 if (!up.nr || up.resv || up.resv2) 523 return -EINVAL; 524 return __io_register_rsrc_update(ctx, type, &up, up.nr); 525 } 526 527 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, 528 unsigned int size, unsigned int type) 529 { 530 struct io_uring_rsrc_register rr; 531 532 /* keep it extendible */ 533 if (size != sizeof(rr)) 534 return -EINVAL; 535 536 memset(&rr, 0, sizeof(rr)); 537 if (copy_from_user(&rr, arg, size)) 538 return -EFAULT; 539 if (!rr.nr || rr.resv2) 540 return -EINVAL; 541 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE) 542 return -EINVAL; 543 544 switch (type) { 545 case IORING_RSRC_FILE: 546 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data) 547 break; 548 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data), 549 rr.nr, u64_to_user_ptr(rr.tags)); 550 case IORING_RSRC_BUFFER: 551 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data) 552 break; 553 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data), 554 rr.nr, u64_to_user_ptr(rr.tags)); 555 } 556 return -EINVAL; 557 } 558 559 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 560 { 561 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 562 563 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 564 return -EINVAL; 565 if (sqe->rw_flags || sqe->splice_fd_in) 566 return -EINVAL; 567 568 up->offset = READ_ONCE(sqe->off); 569 up->nr_args = READ_ONCE(sqe->len); 570 if (!up->nr_args) 571 return -EINVAL; 572 up->arg = READ_ONCE(sqe->addr); 573 return 0; 574 } 575 576 static int io_files_update_with_index_alloc(struct io_kiocb *req, 577 unsigned int issue_flags) 578 { 579 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 580 __s32 __user *fds = u64_to_user_ptr(up->arg); 581 unsigned int done; 582 struct file *file; 583 int ret, fd; 584 585 if (!req->ctx->file_data) 586 return -ENXIO; 587 588 for (done = 0; done < up->nr_args; done++) { 589 if (copy_from_user(&fd, &fds[done], sizeof(fd))) { 590 ret = -EFAULT; 591 break; 592 } 593 594 file = fget(fd); 595 if (!file) { 596 ret = -EBADF; 597 break; 598 } 599 ret = io_fixed_fd_install(req, issue_flags, file, 600 IORING_FILE_INDEX_ALLOC); 601 if (ret < 0) 602 break; 603 if (copy_to_user(&fds[done], &ret, sizeof(ret))) { 604 __io_close_fixed(req->ctx, issue_flags, ret); 605 ret = -EFAULT; 606 break; 607 } 608 } 609 610 if (done) 611 return done; 612 return ret; 613 } 614 615 int io_files_update(struct io_kiocb *req, unsigned int issue_flags) 616 { 617 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 618 struct io_ring_ctx *ctx = req->ctx; 619 struct io_uring_rsrc_update2 up2; 620 int ret; 621 622 up2.offset = up->offset; 623 up2.data = up->arg; 624 up2.nr = 0; 625 up2.tags = 0; 626 up2.resv = 0; 627 up2.resv2 = 0; 628 629 if (up->offset == IORING_FILE_INDEX_ALLOC) { 630 ret = io_files_update_with_index_alloc(req, issue_flags); 631 } else { 632 io_ring_submit_lock(ctx, issue_flags); 633 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, 634 &up2, up->nr_args); 635 io_ring_submit_unlock(ctx, issue_flags); 636 } 637 638 if (ret < 0) 639 req_set_fail(req); 640 io_req_set_res(req, ret, 0); 641 return IOU_OK; 642 } 643 644 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc) 645 { 646 struct io_ring_ctx *ctx = data->ctx; 647 struct io_rsrc_node *node = ctx->rsrc_node; 648 u64 *tag_slot = io_get_tag_slot(data, idx); 649 650 ctx->rsrc_node = io_rsrc_node_alloc(ctx); 651 if (unlikely(!ctx->rsrc_node)) { 652 ctx->rsrc_node = node; 653 return -ENOMEM; 654 } 655 656 node->item.rsrc = rsrc; 657 node->type = data->rsrc_type; 658 node->item.tag = *tag_slot; 659 *tag_slot = 0; 660 list_add_tail(&node->node, &ctx->rsrc_ref_list); 661 io_put_rsrc_node(ctx, node); 662 return 0; 663 } 664 665 void __io_sqe_files_unregister(struct io_ring_ctx *ctx) 666 { 667 int i; 668 669 for (i = 0; i < ctx->nr_user_files; i++) { 670 struct file *file = io_file_from_index(&ctx->file_table, i); 671 672 /* skip scm accounted files, they'll be freed by ->ring_sock */ 673 if (!file || io_file_need_scm(file)) 674 continue; 675 io_file_bitmap_clear(&ctx->file_table, i); 676 fput(file); 677 } 678 679 #if defined(CONFIG_UNIX) 680 if (ctx->ring_sock) { 681 struct sock *sock = ctx->ring_sock->sk; 682 struct sk_buff *skb; 683 684 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL) 685 kfree_skb(skb); 686 } 687 #endif 688 io_free_file_tables(&ctx->file_table); 689 io_file_table_set_alloc_range(ctx, 0, 0); 690 io_rsrc_data_free(ctx->file_data); 691 ctx->file_data = NULL; 692 ctx->nr_user_files = 0; 693 } 694 695 int io_sqe_files_unregister(struct io_ring_ctx *ctx) 696 { 697 unsigned nr = ctx->nr_user_files; 698 int ret; 699 700 if (!ctx->file_data) 701 return -ENXIO; 702 703 /* 704 * Quiesce may unlock ->uring_lock, and while it's not held 705 * prevent new requests using the table. 706 */ 707 ctx->nr_user_files = 0; 708 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx); 709 ctx->nr_user_files = nr; 710 if (!ret) 711 __io_sqe_files_unregister(ctx); 712 return ret; 713 } 714 715 /* 716 * Ensure the UNIX gc is aware of our file set, so we are certain that 717 * the io_uring can be safely unregistered on process exit, even if we have 718 * loops in the file referencing. We account only files that can hold other 719 * files because otherwise they can't form a loop and so are not interesting 720 * for GC. 721 */ 722 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file) 723 { 724 #if defined(CONFIG_UNIX) 725 struct sock *sk = ctx->ring_sock->sk; 726 struct sk_buff_head *head = &sk->sk_receive_queue; 727 struct scm_fp_list *fpl; 728 struct sk_buff *skb; 729 730 if (likely(!io_file_need_scm(file))) 731 return 0; 732 733 /* 734 * See if we can merge this file into an existing skb SCM_RIGHTS 735 * file set. If there's no room, fall back to allocating a new skb 736 * and filling it in. 737 */ 738 spin_lock_irq(&head->lock); 739 skb = skb_peek(head); 740 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD) 741 __skb_unlink(skb, head); 742 else 743 skb = NULL; 744 spin_unlock_irq(&head->lock); 745 746 if (!skb) { 747 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); 748 if (!fpl) 749 return -ENOMEM; 750 751 skb = alloc_skb(0, GFP_KERNEL); 752 if (!skb) { 753 kfree(fpl); 754 return -ENOMEM; 755 } 756 757 fpl->user = get_uid(current_user()); 758 fpl->max = SCM_MAX_FD; 759 fpl->count = 0; 760 761 UNIXCB(skb).fp = fpl; 762 skb->sk = sk; 763 skb->destructor = io_uring_destruct_scm; 764 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 765 } 766 767 fpl = UNIXCB(skb).fp; 768 fpl->fp[fpl->count++] = get_file(file); 769 unix_inflight(fpl->user, file); 770 skb_queue_head(head, skb); 771 fput(file); 772 #endif 773 return 0; 774 } 775 776 static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file) 777 { 778 #if defined(CONFIG_UNIX) 779 struct sock *sock = ctx->ring_sock->sk; 780 struct sk_buff_head list, *head = &sock->sk_receive_queue; 781 struct sk_buff *skb; 782 int i; 783 784 __skb_queue_head_init(&list); 785 786 /* 787 * Find the skb that holds this file in its SCM_RIGHTS. When found, 788 * remove this entry and rearrange the file array. 789 */ 790 skb = skb_dequeue(head); 791 while (skb) { 792 struct scm_fp_list *fp; 793 794 fp = UNIXCB(skb).fp; 795 for (i = 0; i < fp->count; i++) { 796 int left; 797 798 if (fp->fp[i] != file) 799 continue; 800 801 unix_notinflight(fp->user, fp->fp[i]); 802 left = fp->count - 1 - i; 803 if (left) { 804 memmove(&fp->fp[i], &fp->fp[i + 1], 805 left * sizeof(struct file *)); 806 } 807 fp->count--; 808 if (!fp->count) { 809 kfree_skb(skb); 810 skb = NULL; 811 } else { 812 __skb_queue_tail(&list, skb); 813 } 814 fput(file); 815 file = NULL; 816 break; 817 } 818 819 if (!file) 820 break; 821 822 __skb_queue_tail(&list, skb); 823 824 skb = skb_dequeue(head); 825 } 826 827 if (skb_peek(&list)) { 828 spin_lock_irq(&head->lock); 829 while ((skb = __skb_dequeue(&list)) != NULL) 830 __skb_queue_tail(head, skb); 831 spin_unlock_irq(&head->lock); 832 } 833 #endif 834 } 835 836 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) 837 { 838 struct file *file = prsrc->file; 839 840 if (likely(!io_file_need_scm(file))) 841 fput(file); 842 else 843 io_rsrc_file_scm_put(ctx, file); 844 } 845 846 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, 847 unsigned nr_args, u64 __user *tags) 848 { 849 __s32 __user *fds = (__s32 __user *) arg; 850 struct file *file; 851 int fd, ret; 852 unsigned i; 853 854 if (ctx->file_data) 855 return -EBUSY; 856 if (!nr_args) 857 return -EINVAL; 858 if (nr_args > IORING_MAX_FIXED_FILES) 859 return -EMFILE; 860 if (nr_args > rlimit(RLIMIT_NOFILE)) 861 return -EMFILE; 862 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args, 863 &ctx->file_data); 864 if (ret) 865 return ret; 866 867 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) { 868 io_rsrc_data_free(ctx->file_data); 869 ctx->file_data = NULL; 870 return -ENOMEM; 871 } 872 873 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { 874 struct io_fixed_file *file_slot; 875 876 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) { 877 ret = -EFAULT; 878 goto fail; 879 } 880 /* allow sparse sets */ 881 if (!fds || fd == -1) { 882 ret = -EINVAL; 883 if (unlikely(*io_get_tag_slot(ctx->file_data, i))) 884 goto fail; 885 continue; 886 } 887 888 file = fget(fd); 889 ret = -EBADF; 890 if (unlikely(!file)) 891 goto fail; 892 893 /* 894 * Don't allow io_uring instances to be registered. If UNIX 895 * isn't enabled, then this causes a reference cycle and this 896 * instance can never get freed. If UNIX is enabled we'll 897 * handle it just fine, but there's still no point in allowing 898 * a ring fd as it doesn't support regular read/write anyway. 899 */ 900 if (io_is_uring_fops(file)) { 901 fput(file); 902 goto fail; 903 } 904 ret = io_scm_file_account(ctx, file); 905 if (ret) { 906 fput(file); 907 goto fail; 908 } 909 file_slot = io_fixed_file_slot(&ctx->file_table, i); 910 io_fixed_file_set(file_slot, file); 911 io_file_bitmap_set(&ctx->file_table, i); 912 } 913 914 /* default it to the whole table */ 915 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files); 916 return 0; 917 fail: 918 __io_sqe_files_unregister(ctx); 919 return ret; 920 } 921 922 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) 923 { 924 io_buffer_unmap(ctx, &prsrc->buf); 925 prsrc->buf = NULL; 926 } 927 928 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx) 929 { 930 unsigned int i; 931 932 for (i = 0; i < ctx->nr_user_bufs; i++) 933 io_buffer_unmap(ctx, &ctx->user_bufs[i]); 934 kfree(ctx->user_bufs); 935 io_rsrc_data_free(ctx->buf_data); 936 ctx->user_bufs = NULL; 937 ctx->buf_data = NULL; 938 ctx->nr_user_bufs = 0; 939 } 940 941 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) 942 { 943 unsigned nr = ctx->nr_user_bufs; 944 int ret; 945 946 if (!ctx->buf_data) 947 return -ENXIO; 948 949 /* 950 * Quiesce may unlock ->uring_lock, and while it's not held 951 * prevent new requests using the table. 952 */ 953 ctx->nr_user_bufs = 0; 954 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx); 955 ctx->nr_user_bufs = nr; 956 if (!ret) 957 __io_sqe_buffers_unregister(ctx); 958 return ret; 959 } 960 961 /* 962 * Not super efficient, but this is just a registration time. And we do cache 963 * the last compound head, so generally we'll only do a full search if we don't 964 * match that one. 965 * 966 * We check if the given compound head page has already been accounted, to 967 * avoid double accounting it. This allows us to account the full size of the 968 * page, not just the constituent pages of a huge page. 969 */ 970 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, 971 int nr_pages, struct page *hpage) 972 { 973 int i, j; 974 975 /* check current page array */ 976 for (i = 0; i < nr_pages; i++) { 977 if (!PageCompound(pages[i])) 978 continue; 979 if (compound_head(pages[i]) == hpage) 980 return true; 981 } 982 983 /* check previously registered pages */ 984 for (i = 0; i < ctx->nr_user_bufs; i++) { 985 struct io_mapped_ubuf *imu = ctx->user_bufs[i]; 986 987 for (j = 0; j < imu->nr_bvecs; j++) { 988 if (!PageCompound(imu->bvec[j].bv_page)) 989 continue; 990 if (compound_head(imu->bvec[j].bv_page) == hpage) 991 return true; 992 } 993 } 994 995 return false; 996 } 997 998 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, 999 int nr_pages, struct io_mapped_ubuf *imu, 1000 struct page **last_hpage) 1001 { 1002 int i, ret; 1003 1004 imu->acct_pages = 0; 1005 for (i = 0; i < nr_pages; i++) { 1006 if (!PageCompound(pages[i])) { 1007 imu->acct_pages++; 1008 } else { 1009 struct page *hpage; 1010 1011 hpage = compound_head(pages[i]); 1012 if (hpage == *last_hpage) 1013 continue; 1014 *last_hpage = hpage; 1015 if (headpage_already_acct(ctx, pages, i, hpage)) 1016 continue; 1017 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT; 1018 } 1019 } 1020 1021 if (!imu->acct_pages) 1022 return 0; 1023 1024 ret = io_account_mem(ctx, imu->acct_pages); 1025 if (ret) 1026 imu->acct_pages = 0; 1027 return ret; 1028 } 1029 1030 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages) 1031 { 1032 unsigned long start, end, nr_pages; 1033 struct vm_area_struct **vmas = NULL; 1034 struct page **pages = NULL; 1035 int i, pret, ret = -ENOMEM; 1036 1037 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1038 start = ubuf >> PAGE_SHIFT; 1039 nr_pages = end - start; 1040 1041 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); 1042 if (!pages) 1043 goto done; 1044 1045 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *), 1046 GFP_KERNEL); 1047 if (!vmas) 1048 goto done; 1049 1050 ret = 0; 1051 mmap_read_lock(current->mm); 1052 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, 1053 pages, vmas); 1054 if (pret == nr_pages) { 1055 /* don't support file backed memory */ 1056 for (i = 0; i < nr_pages; i++) { 1057 struct vm_area_struct *vma = vmas[i]; 1058 1059 if (vma_is_shmem(vma)) 1060 continue; 1061 if (vma->vm_file && 1062 !is_file_hugepages(vma->vm_file)) { 1063 ret = -EOPNOTSUPP; 1064 break; 1065 } 1066 } 1067 *npages = nr_pages; 1068 } else { 1069 ret = pret < 0 ? pret : -EFAULT; 1070 } 1071 mmap_read_unlock(current->mm); 1072 if (ret) { 1073 /* 1074 * if we did partial map, or found file backed vmas, 1075 * release any pages we did get 1076 */ 1077 if (pret > 0) 1078 unpin_user_pages(pages, pret); 1079 goto done; 1080 } 1081 ret = 0; 1082 done: 1083 kvfree(vmas); 1084 if (ret < 0) { 1085 kvfree(pages); 1086 pages = ERR_PTR(ret); 1087 } 1088 return pages; 1089 } 1090 1091 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, 1092 struct io_mapped_ubuf **pimu, 1093 struct page **last_hpage) 1094 { 1095 struct io_mapped_ubuf *imu = NULL; 1096 struct page **pages = NULL; 1097 unsigned long off; 1098 size_t size; 1099 int ret, nr_pages, i; 1100 struct folio *folio = NULL; 1101 1102 *pimu = ctx->dummy_ubuf; 1103 if (!iov->iov_base) 1104 return 0; 1105 1106 ret = -ENOMEM; 1107 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len, 1108 &nr_pages); 1109 if (IS_ERR(pages)) { 1110 ret = PTR_ERR(pages); 1111 pages = NULL; 1112 goto done; 1113 } 1114 1115 /* If it's a huge page, try to coalesce them into a single bvec entry */ 1116 if (nr_pages > 1) { 1117 folio = page_folio(pages[0]); 1118 for (i = 1; i < nr_pages; i++) { 1119 /* 1120 * Pages must be consecutive and on the same folio for 1121 * this to work 1122 */ 1123 if (page_folio(pages[i]) != folio || 1124 pages[i] != pages[i - 1] + 1) { 1125 folio = NULL; 1126 break; 1127 } 1128 } 1129 if (folio) { 1130 /* 1131 * The pages are bound to the folio, it doesn't 1132 * actually unpin them but drops all but one reference, 1133 * which is usually put down by io_buffer_unmap(). 1134 * Note, needs a better helper. 1135 */ 1136 unpin_user_pages(&pages[1], nr_pages - 1); 1137 nr_pages = 1; 1138 } 1139 } 1140 1141 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); 1142 if (!imu) 1143 goto done; 1144 1145 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage); 1146 if (ret) { 1147 unpin_user_pages(pages, nr_pages); 1148 goto done; 1149 } 1150 1151 off = (unsigned long) iov->iov_base & ~PAGE_MASK; 1152 size = iov->iov_len; 1153 /* store original address for later verification */ 1154 imu->ubuf = (unsigned long) iov->iov_base; 1155 imu->ubuf_end = imu->ubuf + iov->iov_len; 1156 imu->nr_bvecs = nr_pages; 1157 *pimu = imu; 1158 ret = 0; 1159 1160 if (folio) { 1161 bvec_set_page(&imu->bvec[0], pages[0], size, off); 1162 goto done; 1163 } 1164 for (i = 0; i < nr_pages; i++) { 1165 size_t vec_len; 1166 1167 vec_len = min_t(size_t, size, PAGE_SIZE - off); 1168 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off); 1169 off = 0; 1170 size -= vec_len; 1171 } 1172 done: 1173 if (ret) 1174 kvfree(imu); 1175 kvfree(pages); 1176 return ret; 1177 } 1178 1179 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args) 1180 { 1181 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL); 1182 return ctx->user_bufs ? 0 : -ENOMEM; 1183 } 1184 1185 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, 1186 unsigned int nr_args, u64 __user *tags) 1187 { 1188 struct page *last_hpage = NULL; 1189 struct io_rsrc_data *data; 1190 int i, ret; 1191 struct iovec iov; 1192 1193 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16)); 1194 1195 if (ctx->user_bufs) 1196 return -EBUSY; 1197 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS) 1198 return -EINVAL; 1199 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data); 1200 if (ret) 1201 return ret; 1202 ret = io_buffers_map_alloc(ctx, nr_args); 1203 if (ret) { 1204 io_rsrc_data_free(data); 1205 return ret; 1206 } 1207 1208 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) { 1209 if (arg) { 1210 ret = io_copy_iov(ctx, &iov, arg, i); 1211 if (ret) 1212 break; 1213 ret = io_buffer_validate(&iov); 1214 if (ret) 1215 break; 1216 } else { 1217 memset(&iov, 0, sizeof(iov)); 1218 } 1219 1220 if (!iov.iov_base && *io_get_tag_slot(data, i)) { 1221 ret = -EINVAL; 1222 break; 1223 } 1224 1225 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i], 1226 &last_hpage); 1227 if (ret) 1228 break; 1229 } 1230 1231 WARN_ON_ONCE(ctx->buf_data); 1232 1233 ctx->buf_data = data; 1234 if (ret) 1235 __io_sqe_buffers_unregister(ctx); 1236 return ret; 1237 } 1238 1239 int io_import_fixed(int ddir, struct iov_iter *iter, 1240 struct io_mapped_ubuf *imu, 1241 u64 buf_addr, size_t len) 1242 { 1243 u64 buf_end; 1244 size_t offset; 1245 1246 if (WARN_ON_ONCE(!imu)) 1247 return -EFAULT; 1248 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end))) 1249 return -EFAULT; 1250 /* not inside the mapped region */ 1251 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end)) 1252 return -EFAULT; 1253 1254 /* 1255 * Might not be a start of buffer, set size appropriately 1256 * and advance us to the beginning. 1257 */ 1258 offset = buf_addr - imu->ubuf; 1259 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len); 1260 1261 if (offset) { 1262 /* 1263 * Don't use iov_iter_advance() here, as it's really slow for 1264 * using the latter parts of a big fixed buffer - it iterates 1265 * over each segment manually. We can cheat a bit here, because 1266 * we know that: 1267 * 1268 * 1) it's a BVEC iter, we set it up 1269 * 2) all bvecs are PAGE_SIZE in size, except potentially the 1270 * first and last bvec 1271 * 1272 * So just find our index, and adjust the iterator afterwards. 1273 * If the offset is within the first bvec (or the whole first 1274 * bvec, just use iov_iter_advance(). This makes it easier 1275 * since we can just skip the first segment, which may not 1276 * be PAGE_SIZE aligned. 1277 */ 1278 const struct bio_vec *bvec = imu->bvec; 1279 1280 if (offset <= bvec->bv_len) { 1281 /* 1282 * Note, huge pages buffers consists of one large 1283 * bvec entry and should always go this way. The other 1284 * branch doesn't expect non PAGE_SIZE'd chunks. 1285 */ 1286 iter->bvec = bvec; 1287 iter->nr_segs = bvec->bv_len; 1288 iter->count -= offset; 1289 iter->iov_offset = offset; 1290 } else { 1291 unsigned long seg_skip; 1292 1293 /* skip first vec */ 1294 offset -= bvec->bv_len; 1295 seg_skip = 1 + (offset >> PAGE_SHIFT); 1296 1297 iter->bvec = bvec + seg_skip; 1298 iter->nr_segs -= seg_skip; 1299 iter->count -= bvec->bv_len + offset; 1300 iter->iov_offset = offset & ~PAGE_MASK; 1301 } 1302 } 1303 1304 return 0; 1305 } 1306