1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/mm.h> 7 #include <linux/slab.h> 8 #include <linux/namei.h> 9 #include <linux/poll.h> 10 #include <linux/vmalloc.h> 11 #include <linux/io_uring.h> 12 13 #include <uapi/linux/io_uring.h> 14 15 #include "io_uring.h" 16 #include "opdef.h" 17 #include "kbuf.h" 18 #include "memmap.h" 19 20 /* BIDs are addressed by a 16-bit field in a CQE */ 21 #define MAX_BIDS_PER_BGID (1 << 16) 22 23 struct kmem_cache *io_buf_cachep; 24 25 struct io_provide_buf { 26 struct file *file; 27 __u64 addr; 28 __u32 len; 29 __u32 bgid; 30 __u32 nbufs; 31 __u16 bid; 32 }; 33 34 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx, 35 unsigned int bgid) 36 { 37 lockdep_assert_held(&ctx->uring_lock); 38 39 return xa_load(&ctx->io_bl_xa, bgid); 40 } 41 42 static int io_buffer_add_list(struct io_ring_ctx *ctx, 43 struct io_buffer_list *bl, unsigned int bgid) 44 { 45 /* 46 * Store buffer group ID and finally mark the list as visible. 47 * The normal lookup doesn't care about the visibility as we're 48 * always under the ->uring_lock, but the RCU lookup from mmap does. 49 */ 50 bl->bgid = bgid; 51 atomic_set(&bl->refs, 1); 52 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); 53 } 54 55 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) 56 { 57 struct io_ring_ctx *ctx = req->ctx; 58 struct io_buffer_list *bl; 59 struct io_buffer *buf; 60 61 io_ring_submit_lock(ctx, issue_flags); 62 63 buf = req->kbuf; 64 bl = io_buffer_get_list(ctx, buf->bgid); 65 list_add(&buf->list, &bl->buf_list); 66 req->flags &= ~REQ_F_BUFFER_SELECTED; 67 req->buf_index = buf->bgid; 68 69 io_ring_submit_unlock(ctx, issue_flags); 70 return true; 71 } 72 73 void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) 74 { 75 /* 76 * We can add this buffer back to two lists: 77 * 78 * 1) The io_buffers_cache list. This one is protected by the 79 * ctx->uring_lock. If we already hold this lock, add back to this 80 * list as we can grab it from issue as well. 81 * 2) The io_buffers_comp list. This one is protected by the 82 * ctx->completion_lock. 83 * 84 * We migrate buffers from the comp_list to the issue cache list 85 * when we need one. 86 */ 87 if (issue_flags & IO_URING_F_UNLOCKED) { 88 struct io_ring_ctx *ctx = req->ctx; 89 90 spin_lock(&ctx->completion_lock); 91 __io_put_kbuf_list(req, &ctx->io_buffers_comp); 92 spin_unlock(&ctx->completion_lock); 93 } else { 94 lockdep_assert_held(&req->ctx->uring_lock); 95 96 __io_put_kbuf_list(req, &req->ctx->io_buffers_cache); 97 } 98 } 99 100 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, 101 struct io_buffer_list *bl) 102 { 103 if (!list_empty(&bl->buf_list)) { 104 struct io_buffer *kbuf; 105 106 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); 107 list_del(&kbuf->list); 108 if (*len == 0 || *len > kbuf->len) 109 *len = kbuf->len; 110 if (list_empty(&bl->buf_list)) 111 req->flags |= REQ_F_BL_EMPTY; 112 req->flags |= REQ_F_BUFFER_SELECTED; 113 req->kbuf = kbuf; 114 req->buf_index = kbuf->bid; 115 return u64_to_user_ptr(kbuf->addr); 116 } 117 return NULL; 118 } 119 120 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len, 121 struct io_buffer_list *bl, 122 struct iovec *iov) 123 { 124 void __user *buf; 125 126 buf = io_provided_buffer_select(req, len, bl); 127 if (unlikely(!buf)) 128 return -ENOBUFS; 129 130 iov[0].iov_base = buf; 131 iov[0].iov_len = *len; 132 return 0; 133 } 134 135 static struct io_uring_buf *io_ring_head_to_buf(struct io_uring_buf_ring *br, 136 __u16 head, __u16 mask) 137 { 138 return &br->bufs[head & mask]; 139 } 140 141 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, 142 struct io_buffer_list *bl, 143 unsigned int issue_flags) 144 { 145 struct io_uring_buf_ring *br = bl->buf_ring; 146 __u16 tail, head = bl->head; 147 struct io_uring_buf *buf; 148 149 tail = smp_load_acquire(&br->tail); 150 if (unlikely(tail == head)) 151 return NULL; 152 153 if (head + 1 == tail) 154 req->flags |= REQ_F_BL_EMPTY; 155 156 buf = io_ring_head_to_buf(br, head, bl->mask); 157 if (*len == 0 || *len > buf->len) 158 *len = buf->len; 159 req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT; 160 req->buf_list = bl; 161 req->buf_index = buf->bid; 162 163 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { 164 /* 165 * If we came in unlocked, we have no choice but to consume the 166 * buffer here, otherwise nothing ensures that the buffer won't 167 * get used by others. This does mean it'll be pinned until the 168 * IO completes, coming in unlocked means we're being called from 169 * io-wq context and there may be further retries in async hybrid 170 * mode. For the locked case, the caller must call commit when 171 * the transfer completes (or if we get -EAGAIN and must poll of 172 * retry). 173 */ 174 req->flags &= ~REQ_F_BUFFERS_COMMIT; 175 req->buf_list = NULL; 176 bl->head++; 177 } 178 return u64_to_user_ptr(buf->addr); 179 } 180 181 void __user *io_buffer_select(struct io_kiocb *req, size_t *len, 182 unsigned int issue_flags) 183 { 184 struct io_ring_ctx *ctx = req->ctx; 185 struct io_buffer_list *bl; 186 void __user *ret = NULL; 187 188 io_ring_submit_lock(req->ctx, issue_flags); 189 190 bl = io_buffer_get_list(ctx, req->buf_index); 191 if (likely(bl)) { 192 if (bl->is_buf_ring) 193 ret = io_ring_buffer_select(req, len, bl, issue_flags); 194 else 195 ret = io_provided_buffer_select(req, len, bl); 196 } 197 io_ring_submit_unlock(req->ctx, issue_flags); 198 return ret; 199 } 200 201 /* cap it at a reasonable 256, will be one page even for 4K */ 202 #define PEEK_MAX_IMPORT 256 203 204 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg, 205 struct io_buffer_list *bl) 206 { 207 struct io_uring_buf_ring *br = bl->buf_ring; 208 struct iovec *iov = arg->iovs; 209 int nr_iovs = arg->nr_iovs; 210 __u16 nr_avail, tail, head; 211 struct io_uring_buf *buf; 212 213 tail = smp_load_acquire(&br->tail); 214 head = bl->head; 215 nr_avail = min_t(__u16, tail - head, UIO_MAXIOV); 216 if (unlikely(!nr_avail)) 217 return -ENOBUFS; 218 219 buf = io_ring_head_to_buf(br, head, bl->mask); 220 if (arg->max_len) { 221 u32 len = READ_ONCE(buf->len); 222 size_t needed; 223 224 if (unlikely(!len)) 225 return -ENOBUFS; 226 needed = (arg->max_len + len - 1) / len; 227 needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT); 228 if (nr_avail > needed) 229 nr_avail = needed; 230 } 231 232 /* 233 * only alloc a bigger array if we know we have data to map, eg not 234 * a speculative peek operation. 235 */ 236 if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) { 237 iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL); 238 if (unlikely(!iov)) 239 return -ENOMEM; 240 if (arg->mode & KBUF_MODE_FREE) 241 kfree(arg->iovs); 242 arg->iovs = iov; 243 nr_iovs = nr_avail; 244 } else if (nr_avail < nr_iovs) { 245 nr_iovs = nr_avail; 246 } 247 248 /* set it to max, if not set, so we can use it unconditionally */ 249 if (!arg->max_len) 250 arg->max_len = INT_MAX; 251 252 req->buf_index = buf->bid; 253 do { 254 /* truncate end piece, if needed */ 255 if (buf->len > arg->max_len) 256 buf->len = arg->max_len; 257 258 iov->iov_base = u64_to_user_ptr(buf->addr); 259 iov->iov_len = buf->len; 260 iov++; 261 262 arg->out_len += buf->len; 263 arg->max_len -= buf->len; 264 if (!arg->max_len) 265 break; 266 267 buf = io_ring_head_to_buf(br, ++head, bl->mask); 268 } while (--nr_iovs); 269 270 if (head == tail) 271 req->flags |= REQ_F_BL_EMPTY; 272 273 req->flags |= REQ_F_BUFFER_RING; 274 req->buf_list = bl; 275 return iov - arg->iovs; 276 } 277 278 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, 279 unsigned int issue_flags) 280 { 281 struct io_ring_ctx *ctx = req->ctx; 282 struct io_buffer_list *bl; 283 int ret = -ENOENT; 284 285 io_ring_submit_lock(ctx, issue_flags); 286 bl = io_buffer_get_list(ctx, req->buf_index); 287 if (unlikely(!bl)) 288 goto out_unlock; 289 290 if (bl->is_buf_ring) { 291 ret = io_ring_buffers_peek(req, arg, bl); 292 /* 293 * Don't recycle these buffers if we need to go through poll. 294 * Nobody else can use them anyway, and holding on to provided 295 * buffers for a send/write operation would happen on the app 296 * side anyway with normal buffers. Besides, we already 297 * committed them, they cannot be put back in the queue. 298 */ 299 if (ret > 0) { 300 req->flags |= REQ_F_BL_NO_RECYCLE; 301 req->buf_list->head += ret; 302 } 303 } else { 304 ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs); 305 } 306 out_unlock: 307 io_ring_submit_unlock(ctx, issue_flags); 308 return ret; 309 } 310 311 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg) 312 { 313 struct io_ring_ctx *ctx = req->ctx; 314 struct io_buffer_list *bl; 315 int ret; 316 317 lockdep_assert_held(&ctx->uring_lock); 318 319 bl = io_buffer_get_list(ctx, req->buf_index); 320 if (unlikely(!bl)) 321 return -ENOENT; 322 323 if (bl->is_buf_ring) { 324 ret = io_ring_buffers_peek(req, arg, bl); 325 if (ret > 0) 326 req->flags |= REQ_F_BUFFERS_COMMIT; 327 return ret; 328 } 329 330 /* don't support multiple buffer selections for legacy */ 331 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs); 332 } 333 334 static int __io_remove_buffers(struct io_ring_ctx *ctx, 335 struct io_buffer_list *bl, unsigned nbufs) 336 { 337 unsigned i = 0; 338 339 /* shouldn't happen */ 340 if (!nbufs) 341 return 0; 342 343 if (bl->is_buf_ring) { 344 i = bl->buf_ring->tail - bl->head; 345 if (bl->buf_nr_pages) { 346 int j; 347 348 if (!bl->is_mmap) { 349 for (j = 0; j < bl->buf_nr_pages; j++) 350 unpin_user_page(bl->buf_pages[j]); 351 } 352 io_pages_unmap(bl->buf_ring, &bl->buf_pages, 353 &bl->buf_nr_pages, bl->is_mmap); 354 bl->is_mmap = 0; 355 } 356 /* make sure it's seen as empty */ 357 INIT_LIST_HEAD(&bl->buf_list); 358 bl->is_buf_ring = 0; 359 return i; 360 } 361 362 /* protects io_buffers_cache */ 363 lockdep_assert_held(&ctx->uring_lock); 364 365 while (!list_empty(&bl->buf_list)) { 366 struct io_buffer *nxt; 367 368 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); 369 list_move(&nxt->list, &ctx->io_buffers_cache); 370 if (++i == nbufs) 371 return i; 372 cond_resched(); 373 } 374 375 return i; 376 } 377 378 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) 379 { 380 if (atomic_dec_and_test(&bl->refs)) { 381 __io_remove_buffers(ctx, bl, -1U); 382 kfree_rcu(bl, rcu); 383 } 384 } 385 386 void io_destroy_buffers(struct io_ring_ctx *ctx) 387 { 388 struct io_buffer_list *bl; 389 struct list_head *item, *tmp; 390 struct io_buffer *buf; 391 unsigned long index; 392 393 xa_for_each(&ctx->io_bl_xa, index, bl) { 394 xa_erase(&ctx->io_bl_xa, bl->bgid); 395 io_put_bl(ctx, bl); 396 } 397 398 /* 399 * Move deferred locked entries to cache before pruning 400 */ 401 spin_lock(&ctx->completion_lock); 402 if (!list_empty(&ctx->io_buffers_comp)) 403 list_splice_init(&ctx->io_buffers_comp, &ctx->io_buffers_cache); 404 spin_unlock(&ctx->completion_lock); 405 406 list_for_each_safe(item, tmp, &ctx->io_buffers_cache) { 407 buf = list_entry(item, struct io_buffer, list); 408 kmem_cache_free(io_buf_cachep, buf); 409 } 410 } 411 412 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 413 { 414 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 415 u64 tmp; 416 417 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off || 418 sqe->splice_fd_in) 419 return -EINVAL; 420 421 tmp = READ_ONCE(sqe->fd); 422 if (!tmp || tmp > MAX_BIDS_PER_BGID) 423 return -EINVAL; 424 425 memset(p, 0, sizeof(*p)); 426 p->nbufs = tmp; 427 p->bgid = READ_ONCE(sqe->buf_group); 428 return 0; 429 } 430 431 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) 432 { 433 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 434 struct io_ring_ctx *ctx = req->ctx; 435 struct io_buffer_list *bl; 436 int ret = 0; 437 438 io_ring_submit_lock(ctx, issue_flags); 439 440 ret = -ENOENT; 441 bl = io_buffer_get_list(ctx, p->bgid); 442 if (bl) { 443 ret = -EINVAL; 444 /* can't use provide/remove buffers command on mapped buffers */ 445 if (!bl->is_buf_ring) 446 ret = __io_remove_buffers(ctx, bl, p->nbufs); 447 } 448 io_ring_submit_unlock(ctx, issue_flags); 449 if (ret < 0) 450 req_set_fail(req); 451 io_req_set_res(req, ret, 0); 452 return IOU_OK; 453 } 454 455 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 456 { 457 unsigned long size, tmp_check; 458 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 459 u64 tmp; 460 461 if (sqe->rw_flags || sqe->splice_fd_in) 462 return -EINVAL; 463 464 tmp = READ_ONCE(sqe->fd); 465 if (!tmp || tmp > MAX_BIDS_PER_BGID) 466 return -E2BIG; 467 p->nbufs = tmp; 468 p->addr = READ_ONCE(sqe->addr); 469 p->len = READ_ONCE(sqe->len); 470 471 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs, 472 &size)) 473 return -EOVERFLOW; 474 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check)) 475 return -EOVERFLOW; 476 477 size = (unsigned long)p->len * p->nbufs; 478 if (!access_ok(u64_to_user_ptr(p->addr), size)) 479 return -EFAULT; 480 481 p->bgid = READ_ONCE(sqe->buf_group); 482 tmp = READ_ONCE(sqe->off); 483 if (tmp > USHRT_MAX) 484 return -E2BIG; 485 if (tmp + p->nbufs > MAX_BIDS_PER_BGID) 486 return -EINVAL; 487 p->bid = tmp; 488 return 0; 489 } 490 491 #define IO_BUFFER_ALLOC_BATCH 64 492 493 static int io_refill_buffer_cache(struct io_ring_ctx *ctx) 494 { 495 struct io_buffer *bufs[IO_BUFFER_ALLOC_BATCH]; 496 int allocated; 497 498 /* 499 * Completions that don't happen inline (eg not under uring_lock) will 500 * add to ->io_buffers_comp. If we don't have any free buffers, check 501 * the completion list and splice those entries first. 502 */ 503 if (!list_empty_careful(&ctx->io_buffers_comp)) { 504 spin_lock(&ctx->completion_lock); 505 if (!list_empty(&ctx->io_buffers_comp)) { 506 list_splice_init(&ctx->io_buffers_comp, 507 &ctx->io_buffers_cache); 508 spin_unlock(&ctx->completion_lock); 509 return 0; 510 } 511 spin_unlock(&ctx->completion_lock); 512 } 513 514 /* 515 * No free buffers and no completion entries either. Allocate a new 516 * batch of buffer entries and add those to our freelist. 517 */ 518 519 allocated = kmem_cache_alloc_bulk(io_buf_cachep, GFP_KERNEL_ACCOUNT, 520 ARRAY_SIZE(bufs), (void **) bufs); 521 if (unlikely(!allocated)) { 522 /* 523 * Bulk alloc is all-or-nothing. If we fail to get a batch, 524 * retry single alloc to be on the safe side. 525 */ 526 bufs[0] = kmem_cache_alloc(io_buf_cachep, GFP_KERNEL); 527 if (!bufs[0]) 528 return -ENOMEM; 529 allocated = 1; 530 } 531 532 while (allocated) 533 list_add_tail(&bufs[--allocated]->list, &ctx->io_buffers_cache); 534 535 return 0; 536 } 537 538 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf, 539 struct io_buffer_list *bl) 540 { 541 struct io_buffer *buf; 542 u64 addr = pbuf->addr; 543 int i, bid = pbuf->bid; 544 545 for (i = 0; i < pbuf->nbufs; i++) { 546 if (list_empty(&ctx->io_buffers_cache) && 547 io_refill_buffer_cache(ctx)) 548 break; 549 buf = list_first_entry(&ctx->io_buffers_cache, struct io_buffer, 550 list); 551 list_move_tail(&buf->list, &bl->buf_list); 552 buf->addr = addr; 553 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT); 554 buf->bid = bid; 555 buf->bgid = pbuf->bgid; 556 addr += pbuf->len; 557 bid++; 558 cond_resched(); 559 } 560 561 return i ? 0 : -ENOMEM; 562 } 563 564 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) 565 { 566 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 567 struct io_ring_ctx *ctx = req->ctx; 568 struct io_buffer_list *bl; 569 int ret = 0; 570 571 io_ring_submit_lock(ctx, issue_flags); 572 573 bl = io_buffer_get_list(ctx, p->bgid); 574 if (unlikely(!bl)) { 575 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); 576 if (!bl) { 577 ret = -ENOMEM; 578 goto err; 579 } 580 INIT_LIST_HEAD(&bl->buf_list); 581 ret = io_buffer_add_list(ctx, bl, p->bgid); 582 if (ret) { 583 /* 584 * Doesn't need rcu free as it was never visible, but 585 * let's keep it consistent throughout. 586 */ 587 kfree_rcu(bl, rcu); 588 goto err; 589 } 590 } 591 /* can't add buffers via this command for a mapped buffer ring */ 592 if (bl->is_buf_ring) { 593 ret = -EINVAL; 594 goto err; 595 } 596 597 ret = io_add_buffers(ctx, p, bl); 598 err: 599 io_ring_submit_unlock(ctx, issue_flags); 600 601 if (ret < 0) 602 req_set_fail(req); 603 io_req_set_res(req, ret, 0); 604 return IOU_OK; 605 } 606 607 static int io_pin_pbuf_ring(struct io_uring_buf_reg *reg, 608 struct io_buffer_list *bl) 609 { 610 struct io_uring_buf_ring *br = NULL; 611 struct page **pages; 612 int nr_pages, ret; 613 614 pages = io_pin_pages(reg->ring_addr, 615 flex_array_size(br, bufs, reg->ring_entries), 616 &nr_pages); 617 if (IS_ERR(pages)) 618 return PTR_ERR(pages); 619 620 br = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 621 if (!br) { 622 ret = -ENOMEM; 623 goto error_unpin; 624 } 625 626 #ifdef SHM_COLOUR 627 /* 628 * On platforms that have specific aliasing requirements, SHM_COLOUR 629 * is set and we must guarantee that the kernel and user side align 630 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and 631 * the application mmap's the provided ring buffer. Fail the request 632 * if we, by chance, don't end up with aligned addresses. The app 633 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle 634 * this transparently. 635 */ 636 if ((reg->ring_addr | (unsigned long) br) & (SHM_COLOUR - 1)) { 637 ret = -EINVAL; 638 goto error_unpin; 639 } 640 #endif 641 bl->buf_pages = pages; 642 bl->buf_nr_pages = nr_pages; 643 bl->buf_ring = br; 644 bl->is_buf_ring = 1; 645 bl->is_mmap = 0; 646 return 0; 647 error_unpin: 648 unpin_user_pages(pages, nr_pages); 649 kvfree(pages); 650 vunmap(br); 651 return ret; 652 } 653 654 static int io_alloc_pbuf_ring(struct io_ring_ctx *ctx, 655 struct io_uring_buf_reg *reg, 656 struct io_buffer_list *bl) 657 { 658 size_t ring_size; 659 660 ring_size = reg->ring_entries * sizeof(struct io_uring_buf_ring); 661 662 bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size); 663 if (IS_ERR(bl->buf_ring)) { 664 bl->buf_ring = NULL; 665 return -ENOMEM; 666 } 667 668 bl->is_buf_ring = 1; 669 bl->is_mmap = 1; 670 return 0; 671 } 672 673 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) 674 { 675 struct io_uring_buf_reg reg; 676 struct io_buffer_list *bl, *free_bl = NULL; 677 int ret; 678 679 lockdep_assert_held(&ctx->uring_lock); 680 681 if (copy_from_user(®, arg, sizeof(reg))) 682 return -EFAULT; 683 684 if (reg.resv[0] || reg.resv[1] || reg.resv[2]) 685 return -EINVAL; 686 if (reg.flags & ~IOU_PBUF_RING_MMAP) 687 return -EINVAL; 688 if (!(reg.flags & IOU_PBUF_RING_MMAP)) { 689 if (!reg.ring_addr) 690 return -EFAULT; 691 if (reg.ring_addr & ~PAGE_MASK) 692 return -EINVAL; 693 } else { 694 if (reg.ring_addr) 695 return -EINVAL; 696 } 697 698 if (!is_power_of_2(reg.ring_entries)) 699 return -EINVAL; 700 701 /* cannot disambiguate full vs empty due to head/tail size */ 702 if (reg.ring_entries >= 65536) 703 return -EINVAL; 704 705 bl = io_buffer_get_list(ctx, reg.bgid); 706 if (bl) { 707 /* if mapped buffer ring OR classic exists, don't allow */ 708 if (bl->is_buf_ring || !list_empty(&bl->buf_list)) 709 return -EEXIST; 710 } else { 711 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); 712 if (!bl) 713 return -ENOMEM; 714 } 715 716 if (!(reg.flags & IOU_PBUF_RING_MMAP)) 717 ret = io_pin_pbuf_ring(®, bl); 718 else 719 ret = io_alloc_pbuf_ring(ctx, ®, bl); 720 721 if (!ret) { 722 bl->nr_entries = reg.ring_entries; 723 bl->mask = reg.ring_entries - 1; 724 725 io_buffer_add_list(ctx, bl, reg.bgid); 726 return 0; 727 } 728 729 kfree_rcu(free_bl, rcu); 730 return ret; 731 } 732 733 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) 734 { 735 struct io_uring_buf_reg reg; 736 struct io_buffer_list *bl; 737 738 lockdep_assert_held(&ctx->uring_lock); 739 740 if (copy_from_user(®, arg, sizeof(reg))) 741 return -EFAULT; 742 if (reg.resv[0] || reg.resv[1] || reg.resv[2]) 743 return -EINVAL; 744 if (reg.flags) 745 return -EINVAL; 746 747 bl = io_buffer_get_list(ctx, reg.bgid); 748 if (!bl) 749 return -ENOENT; 750 if (!bl->is_buf_ring) 751 return -EINVAL; 752 753 xa_erase(&ctx->io_bl_xa, bl->bgid); 754 io_put_bl(ctx, bl); 755 return 0; 756 } 757 758 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg) 759 { 760 struct io_uring_buf_status buf_status; 761 struct io_buffer_list *bl; 762 int i; 763 764 if (copy_from_user(&buf_status, arg, sizeof(buf_status))) 765 return -EFAULT; 766 767 for (i = 0; i < ARRAY_SIZE(buf_status.resv); i++) 768 if (buf_status.resv[i]) 769 return -EINVAL; 770 771 bl = io_buffer_get_list(ctx, buf_status.buf_group); 772 if (!bl) 773 return -ENOENT; 774 if (!bl->is_buf_ring) 775 return -EINVAL; 776 777 buf_status.head = bl->head; 778 if (copy_to_user(arg, &buf_status, sizeof(buf_status))) 779 return -EFAULT; 780 781 return 0; 782 } 783 784 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, 785 unsigned long bgid) 786 { 787 struct io_buffer_list *bl; 788 bool ret; 789 790 /* 791 * We have to be a bit careful here - we're inside mmap and cannot grab 792 * the uring_lock. This means the buffer_list could be simultaneously 793 * going away, if someone is trying to be sneaky. Look it up under rcu 794 * so we know it's not going away, and attempt to grab a reference to 795 * it. If the ref is already zero, then fail the mapping. If successful, 796 * the caller will call io_put_bl() to drop the the reference at at the 797 * end. This may then safely free the buffer_list (and drop the pages) 798 * at that point, vm_insert_pages() would've already grabbed the 799 * necessary vma references. 800 */ 801 rcu_read_lock(); 802 bl = xa_load(&ctx->io_bl_xa, bgid); 803 /* must be a mmap'able buffer ring and have pages */ 804 ret = false; 805 if (bl && bl->is_mmap) 806 ret = atomic_inc_not_zero(&bl->refs); 807 rcu_read_unlock(); 808 809 if (ret) 810 return bl; 811 812 return ERR_PTR(-EINVAL); 813 } 814 815 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma) 816 { 817 struct io_ring_ctx *ctx = file->private_data; 818 loff_t pgoff = vma->vm_pgoff << PAGE_SHIFT; 819 struct io_buffer_list *bl; 820 int bgid, ret; 821 822 bgid = (pgoff & ~IORING_OFF_MMAP_MASK) >> IORING_OFF_PBUF_SHIFT; 823 bl = io_pbuf_get_bl(ctx, bgid); 824 if (IS_ERR(bl)) 825 return PTR_ERR(bl); 826 827 ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages); 828 io_put_bl(ctx, bl); 829 return ret; 830 } 831