1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/mm.h> 7 #include <linux/slab.h> 8 #include <linux/namei.h> 9 #include <linux/poll.h> 10 #include <linux/vmalloc.h> 11 #include <linux/io_uring.h> 12 13 #include <uapi/linux/io_uring.h> 14 15 #include "io_uring.h" 16 #include "opdef.h" 17 #include "kbuf.h" 18 #include "memmap.h" 19 20 /* BIDs are addressed by a 16-bit field in a CQE */ 21 #define MAX_BIDS_PER_BGID (1 << 16) 22 23 /* Mapped buffer ring, return io_uring_buf from head */ 24 #define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)] 25 26 struct io_provide_buf { 27 struct file *file; 28 __u64 addr; 29 __u32 len; 30 __u32 bgid; 31 __u32 nbufs; 32 __u16 bid; 33 }; 34 35 static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len) 36 { 37 while (len) { 38 struct io_uring_buf *buf; 39 u32 this_len; 40 41 buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask); 42 this_len = min_t(int, len, buf->len); 43 buf->len -= this_len; 44 if (buf->len) { 45 buf->addr += this_len; 46 return false; 47 } 48 bl->head++; 49 len -= this_len; 50 } 51 return true; 52 } 53 54 bool io_kbuf_commit(struct io_kiocb *req, 55 struct io_buffer_list *bl, int len, int nr) 56 { 57 if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT))) 58 return true; 59 60 req->flags &= ~REQ_F_BUFFERS_COMMIT; 61 62 if (unlikely(len < 0)) 63 return true; 64 if (bl->flags & IOBL_INC) 65 return io_kbuf_inc_commit(bl, len); 66 bl->head += nr; 67 return true; 68 } 69 70 static inline struct io_buffer_list *io_buffer_get_list(struct io_ring_ctx *ctx, 71 unsigned int bgid) 72 { 73 lockdep_assert_held(&ctx->uring_lock); 74 75 return xa_load(&ctx->io_bl_xa, bgid); 76 } 77 78 static int io_buffer_add_list(struct io_ring_ctx *ctx, 79 struct io_buffer_list *bl, unsigned int bgid) 80 { 81 /* 82 * Store buffer group ID and finally mark the list as visible. 83 * The normal lookup doesn't care about the visibility as we're 84 * always under the ->uring_lock, but lookups from mmap do. 85 */ 86 bl->bgid = bgid; 87 guard(mutex)(&ctx->mmap_lock); 88 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); 89 } 90 91 void io_kbuf_drop_legacy(struct io_kiocb *req) 92 { 93 if (WARN_ON_ONCE(!(req->flags & REQ_F_BUFFER_SELECTED))) 94 return; 95 req->flags &= ~REQ_F_BUFFER_SELECTED; 96 kfree(req->kbuf); 97 req->kbuf = NULL; 98 } 99 100 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) 101 { 102 struct io_ring_ctx *ctx = req->ctx; 103 struct io_buffer_list *bl; 104 struct io_buffer *buf; 105 106 io_ring_submit_lock(ctx, issue_flags); 107 108 buf = req->kbuf; 109 bl = io_buffer_get_list(ctx, buf->bgid); 110 list_add(&buf->list, &bl->buf_list); 111 bl->nbufs++; 112 req->flags &= ~REQ_F_BUFFER_SELECTED; 113 114 io_ring_submit_unlock(ctx, issue_flags); 115 return true; 116 } 117 118 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, 119 struct io_buffer_list *bl) 120 { 121 if (!list_empty(&bl->buf_list)) { 122 struct io_buffer *kbuf; 123 124 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); 125 list_del(&kbuf->list); 126 bl->nbufs--; 127 if (*len == 0 || *len > kbuf->len) 128 *len = kbuf->len; 129 if (list_empty(&bl->buf_list)) 130 req->flags |= REQ_F_BL_EMPTY; 131 req->flags |= REQ_F_BUFFER_SELECTED; 132 req->kbuf = kbuf; 133 req->buf_index = kbuf->bid; 134 return u64_to_user_ptr(kbuf->addr); 135 } 136 return NULL; 137 } 138 139 static int io_provided_buffers_select(struct io_kiocb *req, size_t *len, 140 struct io_buffer_list *bl, 141 struct iovec *iov) 142 { 143 void __user *buf; 144 145 buf = io_provided_buffer_select(req, len, bl); 146 if (unlikely(!buf)) 147 return -ENOBUFS; 148 149 iov[0].iov_base = buf; 150 iov[0].iov_len = *len; 151 return 1; 152 } 153 154 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, 155 struct io_buffer_list *bl, 156 unsigned int issue_flags) 157 { 158 struct io_uring_buf_ring *br = bl->buf_ring; 159 __u16 tail, head = bl->head; 160 struct io_uring_buf *buf; 161 void __user *ret; 162 163 tail = smp_load_acquire(&br->tail); 164 if (unlikely(tail == head)) 165 return NULL; 166 167 if (head + 1 == tail) 168 req->flags |= REQ_F_BL_EMPTY; 169 170 buf = io_ring_head_to_buf(br, head, bl->mask); 171 if (*len == 0 || *len > buf->len) 172 *len = buf->len; 173 req->flags |= REQ_F_BUFFER_RING | REQ_F_BUFFERS_COMMIT; 174 req->buf_list = bl; 175 req->buf_index = buf->bid; 176 ret = u64_to_user_ptr(buf->addr); 177 178 if (issue_flags & IO_URING_F_UNLOCKED || !io_file_can_poll(req)) { 179 /* 180 * If we came in unlocked, we have no choice but to consume the 181 * buffer here, otherwise nothing ensures that the buffer won't 182 * get used by others. This does mean it'll be pinned until the 183 * IO completes, coming in unlocked means we're being called from 184 * io-wq context and there may be further retries in async hybrid 185 * mode. For the locked case, the caller must call commit when 186 * the transfer completes (or if we get -EAGAIN and must poll of 187 * retry). 188 */ 189 io_kbuf_commit(req, bl, *len, 1); 190 req->buf_list = NULL; 191 } 192 return ret; 193 } 194 195 void __user *io_buffer_select(struct io_kiocb *req, size_t *len, 196 unsigned buf_group, unsigned int issue_flags) 197 { 198 struct io_ring_ctx *ctx = req->ctx; 199 struct io_buffer_list *bl; 200 void __user *ret = NULL; 201 202 io_ring_submit_lock(req->ctx, issue_flags); 203 204 bl = io_buffer_get_list(ctx, buf_group); 205 if (likely(bl)) { 206 if (bl->flags & IOBL_BUF_RING) 207 ret = io_ring_buffer_select(req, len, bl, issue_flags); 208 else 209 ret = io_provided_buffer_select(req, len, bl); 210 } 211 io_ring_submit_unlock(req->ctx, issue_flags); 212 return ret; 213 } 214 215 /* cap it at a reasonable 256, will be one page even for 4K */ 216 #define PEEK_MAX_IMPORT 256 217 218 static int io_ring_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg, 219 struct io_buffer_list *bl) 220 { 221 struct io_uring_buf_ring *br = bl->buf_ring; 222 struct iovec *iov = arg->iovs; 223 int nr_iovs = arg->nr_iovs; 224 __u16 nr_avail, tail, head; 225 struct io_uring_buf *buf; 226 227 tail = smp_load_acquire(&br->tail); 228 head = bl->head; 229 nr_avail = min_t(__u16, tail - head, UIO_MAXIOV); 230 if (unlikely(!nr_avail)) 231 return -ENOBUFS; 232 233 buf = io_ring_head_to_buf(br, head, bl->mask); 234 if (arg->max_len) { 235 u32 len = READ_ONCE(buf->len); 236 size_t needed; 237 238 if (unlikely(!len)) 239 return -ENOBUFS; 240 needed = (arg->max_len + len - 1) / len; 241 needed = min_not_zero(needed, (size_t) PEEK_MAX_IMPORT); 242 if (nr_avail > needed) 243 nr_avail = needed; 244 } 245 246 /* 247 * only alloc a bigger array if we know we have data to map, eg not 248 * a speculative peek operation. 249 */ 250 if (arg->mode & KBUF_MODE_EXPAND && nr_avail > nr_iovs && arg->max_len) { 251 iov = kmalloc_array(nr_avail, sizeof(struct iovec), GFP_KERNEL); 252 if (unlikely(!iov)) 253 return -ENOMEM; 254 if (arg->mode & KBUF_MODE_FREE) 255 kfree(arg->iovs); 256 arg->iovs = iov; 257 nr_iovs = nr_avail; 258 } else if (nr_avail < nr_iovs) { 259 nr_iovs = nr_avail; 260 } 261 262 /* set it to max, if not set, so we can use it unconditionally */ 263 if (!arg->max_len) 264 arg->max_len = INT_MAX; 265 266 req->buf_index = buf->bid; 267 do { 268 u32 len = buf->len; 269 270 /* truncate end piece, if needed, for non partial buffers */ 271 if (len > arg->max_len) { 272 len = arg->max_len; 273 if (!(bl->flags & IOBL_INC)) 274 buf->len = len; 275 } 276 277 iov->iov_base = u64_to_user_ptr(buf->addr); 278 iov->iov_len = len; 279 iov++; 280 281 arg->out_len += len; 282 arg->max_len -= len; 283 if (!arg->max_len) 284 break; 285 286 buf = io_ring_head_to_buf(br, ++head, bl->mask); 287 } while (--nr_iovs); 288 289 if (head == tail) 290 req->flags |= REQ_F_BL_EMPTY; 291 292 req->flags |= REQ_F_BUFFER_RING; 293 req->buf_list = bl; 294 return iov - arg->iovs; 295 } 296 297 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, 298 unsigned int issue_flags) 299 { 300 struct io_ring_ctx *ctx = req->ctx; 301 struct io_buffer_list *bl; 302 int ret = -ENOENT; 303 304 io_ring_submit_lock(ctx, issue_flags); 305 bl = io_buffer_get_list(ctx, arg->buf_group); 306 if (unlikely(!bl)) 307 goto out_unlock; 308 309 if (bl->flags & IOBL_BUF_RING) { 310 ret = io_ring_buffers_peek(req, arg, bl); 311 /* 312 * Don't recycle these buffers if we need to go through poll. 313 * Nobody else can use them anyway, and holding on to provided 314 * buffers for a send/write operation would happen on the app 315 * side anyway with normal buffers. Besides, we already 316 * committed them, they cannot be put back in the queue. 317 */ 318 if (ret > 0) { 319 req->flags |= REQ_F_BUFFERS_COMMIT | REQ_F_BL_NO_RECYCLE; 320 io_kbuf_commit(req, bl, arg->out_len, ret); 321 } 322 } else { 323 ret = io_provided_buffers_select(req, &arg->out_len, bl, arg->iovs); 324 } 325 out_unlock: 326 io_ring_submit_unlock(ctx, issue_flags); 327 return ret; 328 } 329 330 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg) 331 { 332 struct io_ring_ctx *ctx = req->ctx; 333 struct io_buffer_list *bl; 334 int ret; 335 336 lockdep_assert_held(&ctx->uring_lock); 337 338 bl = io_buffer_get_list(ctx, arg->buf_group); 339 if (unlikely(!bl)) 340 return -ENOENT; 341 342 if (bl->flags & IOBL_BUF_RING) { 343 ret = io_ring_buffers_peek(req, arg, bl); 344 if (ret > 0) 345 req->flags |= REQ_F_BUFFERS_COMMIT; 346 return ret; 347 } 348 349 /* don't support multiple buffer selections for legacy */ 350 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs); 351 } 352 353 static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr) 354 { 355 struct io_buffer_list *bl = req->buf_list; 356 bool ret = true; 357 358 if (bl) 359 ret = io_kbuf_commit(req, bl, len, nr); 360 361 req->flags &= ~REQ_F_BUFFER_RING; 362 return ret; 363 } 364 365 unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs) 366 { 367 unsigned int ret; 368 369 ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); 370 371 if (unlikely(!(req->flags & REQ_F_BUFFER_RING))) { 372 io_kbuf_drop_legacy(req); 373 return ret; 374 } 375 376 if (!__io_put_kbuf_ring(req, len, nbufs)) 377 ret |= IORING_CQE_F_BUF_MORE; 378 return ret; 379 } 380 381 static int io_remove_buffers_legacy(struct io_ring_ctx *ctx, 382 struct io_buffer_list *bl, 383 unsigned long nbufs) 384 { 385 unsigned long i = 0; 386 struct io_buffer *nxt; 387 388 /* protects io_buffers_cache */ 389 lockdep_assert_held(&ctx->uring_lock); 390 WARN_ON_ONCE(bl->flags & IOBL_BUF_RING); 391 392 for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) { 393 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); 394 list_del(&nxt->list); 395 bl->nbufs--; 396 kfree(nxt); 397 cond_resched(); 398 } 399 return i; 400 } 401 402 static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) 403 { 404 if (bl->flags & IOBL_BUF_RING) 405 io_free_region(ctx, &bl->region); 406 else 407 io_remove_buffers_legacy(ctx, bl, -1U); 408 409 kfree(bl); 410 } 411 412 void io_destroy_buffers(struct io_ring_ctx *ctx) 413 { 414 struct io_buffer_list *bl; 415 416 while (1) { 417 unsigned long index = 0; 418 419 scoped_guard(mutex, &ctx->mmap_lock) { 420 bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT); 421 if (bl) 422 xa_erase(&ctx->io_bl_xa, bl->bgid); 423 } 424 if (!bl) 425 break; 426 io_put_bl(ctx, bl); 427 } 428 } 429 430 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) 431 { 432 scoped_guard(mutex, &ctx->mmap_lock) 433 WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl); 434 io_put_bl(ctx, bl); 435 } 436 437 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 438 { 439 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 440 u64 tmp; 441 442 if (sqe->rw_flags || sqe->addr || sqe->len || sqe->off || 443 sqe->splice_fd_in) 444 return -EINVAL; 445 446 tmp = READ_ONCE(sqe->fd); 447 if (!tmp || tmp > MAX_BIDS_PER_BGID) 448 return -EINVAL; 449 450 memset(p, 0, sizeof(*p)); 451 p->nbufs = tmp; 452 p->bgid = READ_ONCE(sqe->buf_group); 453 return 0; 454 } 455 456 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 457 { 458 unsigned long size, tmp_check; 459 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 460 u64 tmp; 461 462 if (sqe->rw_flags || sqe->splice_fd_in) 463 return -EINVAL; 464 465 tmp = READ_ONCE(sqe->fd); 466 if (!tmp || tmp > MAX_BIDS_PER_BGID) 467 return -E2BIG; 468 p->nbufs = tmp; 469 p->addr = READ_ONCE(sqe->addr); 470 p->len = READ_ONCE(sqe->len); 471 if (!p->len) 472 return -EINVAL; 473 474 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs, 475 &size)) 476 return -EOVERFLOW; 477 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check)) 478 return -EOVERFLOW; 479 if (!access_ok(u64_to_user_ptr(p->addr), size)) 480 return -EFAULT; 481 482 p->bgid = READ_ONCE(sqe->buf_group); 483 tmp = READ_ONCE(sqe->off); 484 if (tmp > USHRT_MAX) 485 return -E2BIG; 486 if (tmp + p->nbufs > MAX_BIDS_PER_BGID) 487 return -EINVAL; 488 p->bid = tmp; 489 return 0; 490 } 491 492 static int io_add_buffers(struct io_ring_ctx *ctx, struct io_provide_buf *pbuf, 493 struct io_buffer_list *bl) 494 { 495 struct io_buffer *buf; 496 u64 addr = pbuf->addr; 497 int ret = -ENOMEM, i, bid = pbuf->bid; 498 499 for (i = 0; i < pbuf->nbufs; i++) { 500 /* 501 * Nonsensical to have more than sizeof(bid) buffers in a 502 * buffer list, as the application then has no way of knowing 503 * which duplicate bid refers to what buffer. 504 */ 505 if (bl->nbufs == USHRT_MAX) { 506 ret = -EOVERFLOW; 507 break; 508 } 509 buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT); 510 if (!buf) 511 break; 512 513 list_add_tail(&buf->list, &bl->buf_list); 514 bl->nbufs++; 515 buf->addr = addr; 516 buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT); 517 buf->bid = bid; 518 buf->bgid = pbuf->bgid; 519 addr += pbuf->len; 520 bid++; 521 cond_resched(); 522 } 523 524 return i ? 0 : ret; 525 } 526 527 static int __io_manage_buffers_legacy(struct io_kiocb *req, 528 struct io_buffer_list *bl) 529 { 530 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 531 int ret; 532 533 if (!bl) { 534 if (req->opcode != IORING_OP_PROVIDE_BUFFERS) 535 return -ENOENT; 536 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); 537 if (!bl) 538 return -ENOMEM; 539 540 INIT_LIST_HEAD(&bl->buf_list); 541 ret = io_buffer_add_list(req->ctx, bl, p->bgid); 542 if (ret) { 543 kfree(bl); 544 return ret; 545 } 546 } 547 /* can't use provide/remove buffers command on mapped buffers */ 548 if (bl->flags & IOBL_BUF_RING) 549 return -EINVAL; 550 if (req->opcode == IORING_OP_PROVIDE_BUFFERS) 551 return io_add_buffers(req->ctx, p, bl); 552 return io_remove_buffers_legacy(req->ctx, bl, p->nbufs); 553 } 554 555 int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags) 556 { 557 struct io_provide_buf *p = io_kiocb_to_cmd(req, struct io_provide_buf); 558 struct io_ring_ctx *ctx = req->ctx; 559 struct io_buffer_list *bl; 560 int ret; 561 562 io_ring_submit_lock(ctx, issue_flags); 563 bl = io_buffer_get_list(ctx, p->bgid); 564 ret = __io_manage_buffers_legacy(req, bl); 565 io_ring_submit_unlock(ctx, issue_flags); 566 567 if (ret < 0) 568 req_set_fail(req); 569 io_req_set_res(req, ret, 0); 570 return IOU_COMPLETE; 571 } 572 573 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) 574 { 575 struct io_uring_buf_reg reg; 576 struct io_buffer_list *bl; 577 struct io_uring_region_desc rd; 578 struct io_uring_buf_ring *br; 579 unsigned long mmap_offset; 580 unsigned long ring_size; 581 int ret; 582 583 lockdep_assert_held(&ctx->uring_lock); 584 585 if (copy_from_user(®, arg, sizeof(reg))) 586 return -EFAULT; 587 if (!mem_is_zero(reg.resv, sizeof(reg.resv))) 588 return -EINVAL; 589 if (reg.flags & ~(IOU_PBUF_RING_MMAP | IOU_PBUF_RING_INC)) 590 return -EINVAL; 591 if (!is_power_of_2(reg.ring_entries)) 592 return -EINVAL; 593 /* cannot disambiguate full vs empty due to head/tail size */ 594 if (reg.ring_entries >= 65536) 595 return -EINVAL; 596 597 bl = io_buffer_get_list(ctx, reg.bgid); 598 if (bl) { 599 /* if mapped buffer ring OR classic exists, don't allow */ 600 if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list)) 601 return -EEXIST; 602 io_destroy_bl(ctx, bl); 603 } 604 605 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); 606 if (!bl) 607 return -ENOMEM; 608 609 mmap_offset = (unsigned long)reg.bgid << IORING_OFF_PBUF_SHIFT; 610 ring_size = flex_array_size(br, bufs, reg.ring_entries); 611 612 memset(&rd, 0, sizeof(rd)); 613 rd.size = PAGE_ALIGN(ring_size); 614 if (!(reg.flags & IOU_PBUF_RING_MMAP)) { 615 rd.user_addr = reg.ring_addr; 616 rd.flags |= IORING_MEM_REGION_TYPE_USER; 617 } 618 ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset); 619 if (ret) 620 goto fail; 621 br = io_region_get_ptr(&bl->region); 622 623 #ifdef SHM_COLOUR 624 /* 625 * On platforms that have specific aliasing requirements, SHM_COLOUR 626 * is set and we must guarantee that the kernel and user side align 627 * nicely. We cannot do that if IOU_PBUF_RING_MMAP isn't set and 628 * the application mmap's the provided ring buffer. Fail the request 629 * if we, by chance, don't end up with aligned addresses. The app 630 * should use IOU_PBUF_RING_MMAP instead, and liburing will handle 631 * this transparently. 632 */ 633 if (!(reg.flags & IOU_PBUF_RING_MMAP) && 634 ((reg.ring_addr | (unsigned long)br) & (SHM_COLOUR - 1))) { 635 ret = -EINVAL; 636 goto fail; 637 } 638 #endif 639 640 bl->nr_entries = reg.ring_entries; 641 bl->mask = reg.ring_entries - 1; 642 bl->flags |= IOBL_BUF_RING; 643 bl->buf_ring = br; 644 if (reg.flags & IOU_PBUF_RING_INC) 645 bl->flags |= IOBL_INC; 646 io_buffer_add_list(ctx, bl, reg.bgid); 647 return 0; 648 fail: 649 io_free_region(ctx, &bl->region); 650 kfree(bl); 651 return ret; 652 } 653 654 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg) 655 { 656 struct io_uring_buf_reg reg; 657 struct io_buffer_list *bl; 658 659 lockdep_assert_held(&ctx->uring_lock); 660 661 if (copy_from_user(®, arg, sizeof(reg))) 662 return -EFAULT; 663 if (!mem_is_zero(reg.resv, sizeof(reg.resv)) || reg.flags) 664 return -EINVAL; 665 666 bl = io_buffer_get_list(ctx, reg.bgid); 667 if (!bl) 668 return -ENOENT; 669 if (!(bl->flags & IOBL_BUF_RING)) 670 return -EINVAL; 671 672 scoped_guard(mutex, &ctx->mmap_lock) 673 xa_erase(&ctx->io_bl_xa, bl->bgid); 674 675 io_put_bl(ctx, bl); 676 return 0; 677 } 678 679 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg) 680 { 681 struct io_uring_buf_status buf_status; 682 struct io_buffer_list *bl; 683 684 if (copy_from_user(&buf_status, arg, sizeof(buf_status))) 685 return -EFAULT; 686 if (!mem_is_zero(buf_status.resv, sizeof(buf_status.resv))) 687 return -EINVAL; 688 689 bl = io_buffer_get_list(ctx, buf_status.buf_group); 690 if (!bl) 691 return -ENOENT; 692 if (!(bl->flags & IOBL_BUF_RING)) 693 return -EINVAL; 694 695 buf_status.head = bl->head; 696 if (copy_to_user(arg, &buf_status, sizeof(buf_status))) 697 return -EFAULT; 698 699 return 0; 700 } 701 702 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx, 703 unsigned int bgid) 704 { 705 struct io_buffer_list *bl; 706 707 lockdep_assert_held(&ctx->mmap_lock); 708 709 bl = xa_load(&ctx->io_bl_xa, bgid); 710 if (!bl || !(bl->flags & IOBL_BUF_RING)) 711 return NULL; 712 return &bl->region; 713 } 714