Lines Matching full:bl

35 static bool io_kbuf_inc_commit(struct io_buffer_list *bl, int len)
41 buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
52 bl->head++;
59 struct io_buffer_list *bl, int len, int nr)
68 if (bl->flags & IOBL_INC)
69 return io_kbuf_inc_commit(bl, len);
70 bl->head += nr;
83 struct io_buffer_list *bl, unsigned int bgid)
90 bl->bgid = bgid;
92 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
107 struct io_buffer_list *bl;
113 bl = io_buffer_get_list(ctx, buf->bgid);
114 list_add(&buf->list, &bl->buf_list);
115 bl->nbufs++;
123 struct io_buffer_list *bl)
125 if (!list_empty(&bl->buf_list)) {
128 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
130 bl->nbufs--;
133 if (list_empty(&bl->buf_list))
144 struct io_buffer_list *bl,
149 buf = io_provided_buffer_select(req, len, bl);
180 struct io_buffer_list *bl,
183 struct io_uring_buf_ring *br = bl->buf_ring;
184 __u16 tail, head = bl->head;
196 buf = io_ring_head_to_buf(br, head, bl->mask);
202 sel.buf_list = bl;
217 struct io_buffer_list *bl;
221 bl = io_buffer_get_list(ctx, buf_group);
222 if (likely(bl)) {
223 if (bl->flags & IOBL_BUF_RING)
224 sel = io_ring_buffer_select(req, len, bl, issue_flags);
226 sel.addr = io_provided_buffer_select(req, len, bl);
236 struct io_buffer_list *bl)
238 struct io_uring_buf_ring *br = bl->buf_ring;
245 head = bl->head;
250 buf = io_ring_head_to_buf(br, head, bl->mask);
290 if (!(bl->flags & IOBL_INC)) {
307 buf = io_ring_head_to_buf(br, ++head, bl->mask);
356 struct io_buffer_list *bl;
361 bl = io_buffer_get_list(ctx, arg->buf_group);
362 if (unlikely(!bl))
365 if (bl->flags & IOBL_BUF_RING) {
366 ret = io_ring_buffers_peek(req, arg, bl);
369 sel->buf_list = bl;
375 return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
379 struct io_buffer_list *bl, int len, int nr)
383 if (bl)
384 ret = io_kbuf_commit(req, bl, len, nr);
390 unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
402 if (!__io_put_kbuf_ring(req, bl, len, nbufs))
408 struct io_buffer_list *bl,
416 WARN_ON_ONCE(bl->flags & IOBL_BUF_RING);
418 for (i = 0; i < nbufs && !list_empty(&bl->buf_list); i++) {
419 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
421 bl->nbufs--;
428 static void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
430 if (bl->flags & IOBL_BUF_RING)
431 io_free_region(ctx, &bl->region);
433 io_remove_buffers_legacy(ctx, bl, -1U);
435 kfree(bl);
440 struct io_buffer_list *bl;
446 bl = xa_find(&ctx->io_bl_xa, &index, ULONG_MAX, XA_PRESENT);
447 if (bl)
448 xa_erase(&ctx->io_bl_xa, bl->bgid);
450 if (!bl)
452 io_put_bl(ctx, bl);
456 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
459 WARN_ON_ONCE(xa_erase(&ctx->io_bl_xa, bl->bgid) != bl);
460 io_put_bl(ctx, bl);
519 struct io_buffer_list *bl)
531 if (bl->nbufs == USHRT_MAX) {
539 list_add_tail(&buf->list, &bl->buf_list);
540 bl->nbufs++;
554 struct io_buffer_list *bl)
559 if (!bl) {
562 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
563 if (!bl)
566 INIT_LIST_HEAD(&bl->buf_list);
567 ret = io_buffer_add_list(req->ctx, bl, p->bgid);
569 kfree(bl);
574 if (bl->flags & IOBL_BUF_RING)
577 return io_add_buffers(req->ctx, p, bl);
578 return io_remove_buffers_legacy(req->ctx, bl, p->nbufs);
585 struct io_buffer_list *bl;
589 bl = io_buffer_get_list(ctx, p->bgid);
590 ret = __io_manage_buffers_legacy(req, bl);
602 struct io_buffer_list *bl;
623 bl = io_buffer_get_list(ctx, reg.bgid);
624 if (bl) {
626 if (bl->flags & IOBL_BUF_RING || !list_empty(&bl->buf_list))
628 io_destroy_bl(ctx, bl);
631 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
632 if (!bl)
644 ret = io_create_region_mmap_safe(ctx, &bl->region, &rd, mmap_offset);
647 br = io_region_get_ptr(&bl->region);
666 bl->nr_entries = reg.ring_entries;
667 bl->mask = reg.ring_entries - 1;
668 bl->flags |= IOBL_BUF_RING;
669 bl->buf_ring = br;
671 bl->flags |= IOBL_INC;
672 io_buffer_add_list(ctx, bl, reg.bgid);
675 io_free_region(ctx, &bl->region);
676 kfree(bl);
683 struct io_buffer_list *bl;
692 bl = io_buffer_get_list(ctx, reg.bgid);
693 if (!bl)
695 if (!(bl->flags & IOBL_BUF_RING))
699 xa_erase(&ctx->io_bl_xa, bl->bgid);
701 io_put_bl(ctx, bl);
708 struct io_buffer_list *bl;
715 bl = io_buffer_get_list(ctx, buf_status.buf_group);
716 if (!bl)
718 if (!(bl->flags & IOBL_BUF_RING))
721 buf_status.head = bl->head;
731 struct io_buffer_list *bl;
735 bl = xa_load(&ctx->io_bl_xa, bgid);
736 if (!bl || !(bl->flags & IOBL_BUF_RING))
738 return &bl->region;