Lines Matching refs:bl
35 static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len) in io_kbuf_inc_commit() argument
41 buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask); in io_kbuf_inc_commit()
52 bl->head++; in io_kbuf_inc_commit()
59 struct io_buffer_list *bl, int len, int nr) in io_kbuf_commit() argument
68 if (bl->flags & IOBL_INC) in io_kbuf_commit()
69 return io_kbuf_inc_commit(bl, len); in io_kbuf_commit()
70 bl->head += nr; in io_kbuf_commit()
83 struct io_buffer_list *bl, unsigned int bgid) in io_buffer_add_list() argument
90 bl->bgid = bgid; in io_buffer_add_list()
92 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); in io_buffer_add_list()
107 struct io_buffer_list *bl; in io_kbuf_recycle_legacy() local
113 bl = io_buffer_get_list(ctx, buf->bgid); in io_kbuf_recycle_legacy()
114 list_add(&buf->list, &bl->buf_list); in io_kbuf_recycle_legacy()
115 bl->nbufs++; in io_kbuf_recycle_legacy()
123 struct io_buffer_list *bl) in io_provided_buffer_select() argument
125 if (!list_empty(&bl->buf_list)) { in io_provided_buffer_select()
128 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); in io_provided_buffer_select()
130 bl->nbufs--; in io_provided_buffer_select()
133 if (list_empty(&bl->buf_list)) in io_provided_buffer_select()
144 struct io_buffer_list *bl, in io_provided_buffers_select() argument
149 buf = io_provided_buffer_select(req, len, bl); in io_provided_buffers_select()
180 struct io_buffer_list *bl, in io_ring_buffer_select() argument
183 struct io_uring_buf_ring *br = bl->buf_ring; in io_ring_buffer_select()
184 __u16 tail, head = bl->head; in io_ring_buffer_select()
196 buf = io_ring_head_to_buf(br, head, bl->mask); in io_ring_buffer_select()
202 sel.buf_list = bl; in io_ring_buffer_select()
217 struct io_buffer_list *bl; in io_buffer_select() local
221 bl = io_buffer_get_list(ctx, buf_group); in io_buffer_select()
222 if (likely(bl)) { in io_buffer_select()
223 if (bl->flags & IOBL_BUF_RING) in io_buffer_select()
224 sel = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
226 sel.addr = io_provided_buffer_select(req, len, bl); in io_buffer_select()
236 struct io_buffer_list *bl) in io_ring_buffers_peek() argument
238 struct io_uring_buf_ring *br = bl->buf_ring; in io_ring_buffers_peek()
245 head = bl->head; in io_ring_buffers_peek()
250 buf = io_ring_head_to_buf(br, head, bl->mask); in io_ring_buffers_peek()
290 if (!(bl->flags & IOBL_INC)) { in io_ring_buffers_peek()
307 buf = io_ring_head_to_buf(br, ++head, bl->mask); in io_ring_buffers_peek()
356 struct io_buffer_list *bl; in io_buffers_peek() local
361 bl = io_buffer_get_list(ctx, arg->buf_group); in io_buffers_peek()
362 if (unlikely(!bl)) in io_buffers_peek()
365 if (bl->flags & IOBL_BUF_RING) { in io_buffers_peek()
366 ret = io_ring_buffers_peek(req, arg, bl); in io_buffers_peek()
369 sel->buf_list = bl; in io_buffers_peek()
375 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs); in io_buffers_peek()
379 struct io_buffer_list *bl, int len, int nr) in __io_put_kbuf_ring() argument
383 if (bl) in __io_put_kbuf_ring()
384 ret = io_kbuf_commit(req, bl, len, nr); in __io_put_kbuf_ring()
390 unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl, in __io_put_kbufs() argument
402 if (!__io_put_kbuf_ring(req, bl, len, nbufs)) in __io_put_kbufs()
408 struct io_buffer_list *bl, in io_remove_buffers_legacy() argument
416 WARN_ON_ONCE(bl->flags & IOBL_BUF_RING); in io_remove_buffers_legacy()
418 for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) { in io_remove_buffers_legacy()
419 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); in io_remove_buffers_legacy()
421 bl->nbufs--; in io_remove_buffers_legacy()
428 static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_put_bl() argument
430 if (bl->flags & IOBL_BUF_RING) in io_put_bl()
431 io_free_region(ctx->user, &bl->region); in io_put_bl()
433 io_remove_buffers_legacy(ctx, bl, -1U); in io_put_bl()
435 kfree(bl); in io_put_bl()
440 struct io_buffer_list *bl; in io_destroy_buffers() local
446 bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT); in io_destroy_buffers()
447 if (bl) in io_destroy_buffers()
448 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_destroy_buffers()
450 if (!bl) in io_destroy_buffers()
452 io_put_bl(ctx, bl); in io_destroy_buffers()
456 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_destroy_bl() argument
459 WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl); in io_destroy_bl()
460 io_put_bl(ctx, bl); in io_destroy_bl()
519 struct io_buffer_list *bl) in io_add_buffers() argument
531 if (bl->nbufs == USHRT_MAX) { in io_add_buffers()
539 list_add_tail(&buf->list, &bl->buf_list); in io_add_buffers()
540 bl->nbufs++; in io_add_buffers()
554 struct io_buffer_list *bl) in __io_manage_buffers_legacy() argument
559 if (!bl) { in __io_manage_buffers_legacy()
562 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); in __io_manage_buffers_legacy()
563 if (!bl) in __io_manage_buffers_legacy()
566 INIT_LIST_HEAD(&bl->buf_list); in __io_manage_buffers_legacy()
567 ret = io_buffer_add_list(req->ctx, bl, p->bgid); in __io_manage_buffers_legacy()
569 kfree(bl); in __io_manage_buffers_legacy()
574 if (bl->flags & IOBL_BUF_RING) in __io_manage_buffers_legacy()
577 return io_add_buffers(req->ctx, p, bl); in __io_manage_buffers_legacy()
578 return io_remove_buffers_legacy(req->ctx, bl, p->nbufs); in __io_manage_buffers_legacy()
585 struct io_buffer_list *bl; in io_manage_buffers_legacy() local
589 bl = io_buffer_get_list(ctx, p->bgid); in io_manage_buffers_legacy()
590 ret = __io_manage_buffers_legacy(req, bl); in io_manage_buffers_legacy()
602 struct io_buffer_list *bl; in io_register_pbuf_ring() local
623 bl = io_buffer_get_list(ctx, reg.bgid); in io_register_pbuf_ring()
624 if (bl) { in io_register_pbuf_ring()
626 if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list)) in io_register_pbuf_ring()
628 io_destroy_bl(ctx, bl); in io_register_pbuf_ring()
631 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); in io_register_pbuf_ring()
632 if (!bl) in io_register_pbuf_ring()
644 ret = io_create_region(ctx, &bl->region, &rd, mmap_offset); in io_register_pbuf_ring()
647 br = io_region_get_ptr(&bl->region); in io_register_pbuf_ring()
666 bl->nr_entries = reg.ring_entries; in io_register_pbuf_ring()
667 bl->mask = reg.ring_entries - 1; in io_register_pbuf_ring()
668 bl->flags |= IOBL_BUF_RING; in io_register_pbuf_ring()
669 bl->buf_ring = br; in io_register_pbuf_ring()
671 bl->flags |= IOBL_INC; in io_register_pbuf_ring()
672 io_buffer_add_list(ctx, bl, reg.bgid); in io_register_pbuf_ring()
675 io_free_region(ctx->user, &bl->region); in io_register_pbuf_ring()
676 kfree(bl); in io_register_pbuf_ring()
683 struct io_buffer_list *bl; in io_unregister_pbuf_ring() local
692 bl = io_buffer_get_list(ctx, reg.bgid); in io_unregister_pbuf_ring()
693 if (!bl) in io_unregister_pbuf_ring()
695 if (!(bl->flags & IOBL_BUF_RING)) in io_unregister_pbuf_ring()
699 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_unregister_pbuf_ring()
701 io_put_bl(ctx, bl); in io_unregister_pbuf_ring()
708 struct io_buffer_list *bl; in io_register_pbuf_status() local
715 bl = io_buffer_get_list(ctx, buf_status.buf_group); in io_register_pbuf_status()
716 if (!bl) in io_register_pbuf_status()
718 if (!(bl->flags & IOBL_BUF_RING)) in io_register_pbuf_status()
721 buf_status.head = bl->head; in io_register_pbuf_status()
731 struct io_buffer_list *bl; in io_pbuf_get_region() local
735 bl = xa_load(&ctx->io_bl_xa, bgid); in io_pbuf_get_region()
736 if (!bl || !(bl->flags & IOBL_BUF_RING)) in io_pbuf_get_region()
738 return &bl->region; in io_pbuf_get_region()