13b77495aSJens Axboe // SPDX-License-Identifier: GPL-2.0 23b77495aSJens Axboe #ifndef IOU_KBUF_H 33b77495aSJens Axboe #define IOU_KBUF_H 43b77495aSJens Axboe 53b77495aSJens Axboe #include <uapi/linux/io_uring.h> 63b77495aSJens Axboe 7a69307a5SJens Axboe enum { 8a69307a5SJens Axboe /* ring mapped provided buffers */ 9a69307a5SJens Axboe IOBL_BUF_RING = 1, 10a69307a5SJens Axboe /* ring mapped provided buffers, but mmap'ed by application */ 11a69307a5SJens Axboe IOBL_MMAP = 2, 12a69307a5SJens Axboe }; 13a69307a5SJens Axboe 143b77495aSJens Axboe struct io_buffer_list { 153b77495aSJens Axboe /* 163b77495aSJens Axboe * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, 173b77495aSJens Axboe * then these are classic provided buffers and ->buf_list is used. 183b77495aSJens Axboe */ 193b77495aSJens Axboe union { 203b77495aSJens Axboe struct list_head buf_list; 213b77495aSJens Axboe struct { 223b77495aSJens Axboe struct page **buf_pages; 233b77495aSJens Axboe struct io_uring_buf_ring *buf_ring; 243b77495aSJens Axboe }; 255cf4f52eSJens Axboe struct rcu_head rcu; 263b77495aSJens Axboe }; 273b77495aSJens Axboe __u16 bgid; 283b77495aSJens Axboe 293b77495aSJens Axboe /* below is for ring provided buffers */ 303b77495aSJens Axboe __u16 buf_nr_pages; 313b77495aSJens Axboe __u16 nr_entries; 323b77495aSJens Axboe __u16 head; 333b77495aSJens Axboe __u16 mask; 3425a2c188SJens Axboe 35a69307a5SJens Axboe __u16 flags; 366b69c4abSJens Axboe 37a69307a5SJens Axboe atomic_t refs; 383b77495aSJens Axboe }; 393b77495aSJens Axboe 403b77495aSJens Axboe struct io_buffer { 413b77495aSJens Axboe struct list_head list; 423b77495aSJens Axboe __u64 addr; 433b77495aSJens Axboe __u32 len; 443b77495aSJens Axboe __u16 bid; 453b77495aSJens Axboe __u16 bgid; 463b77495aSJens Axboe }; 473b77495aSJens Axboe 4835c8711cSJens Axboe enum { 4935c8711cSJens Axboe /* can alloc a bigger vec */ 5035c8711cSJens Axboe KBUF_MODE_EXPAND = 1, 5135c8711cSJens Axboe /* if bigger vec allocated, free old one */ 5235c8711cSJens Axboe KBUF_MODE_FREE = 2, 5335c8711cSJens Axboe }; 5435c8711cSJens Axboe 5535c8711cSJens Axboe struct buf_sel_arg { 5635c8711cSJens Axboe struct iovec *iovs; 5735c8711cSJens Axboe size_t out_len; 5835c8711cSJens Axboe size_t max_len; 5912044332SJens Axboe unsigned short nr_iovs; 6012044332SJens Axboe unsigned short mode; 6135c8711cSJens Axboe }; 6235c8711cSJens Axboe 633b77495aSJens Axboe void __user *io_buffer_select(struct io_kiocb *req, size_t *len, 643b77495aSJens Axboe unsigned int issue_flags); 6535c8711cSJens Axboe int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, 6635c8711cSJens Axboe unsigned int issue_flags); 6735c8711cSJens Axboe int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg); 683b77495aSJens Axboe void io_destroy_buffers(struct io_ring_ctx *ctx); 693b77495aSJens Axboe 703b77495aSJens Axboe int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 713b77495aSJens Axboe int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags); 723b77495aSJens Axboe 733b77495aSJens Axboe int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 743b77495aSJens Axboe int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags); 753b77495aSJens Axboe 763b77495aSJens Axboe int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); 773b77495aSJens Axboe int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); 78d293b1a8SJens Axboe int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); 793b77495aSJens Axboe 808435c6f3SJens Axboe void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags); 8153ccf69bSPavel Begunkov 8289d528baSDylan Yudaken bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); 83795bbbc8SHao Xu 84561e4f94SJens Axboe void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl); 85561e4f94SJens Axboe struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx, 86561e4f94SJens Axboe unsigned long bgid); 8787585b05SJens Axboe int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma); 88c56e022cSJens Axboe 8989d528baSDylan Yudaken static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) 90795bbbc8SHao Xu { 91795bbbc8SHao Xu /* 92795bbbc8SHao Xu * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear 93795bbbc8SHao Xu * the flag and hence ensure that bl->head doesn't get incremented. 94795bbbc8SHao Xu * If the tail has already been incremented, hang on to it. 95795bbbc8SHao Xu * The exception is partial io, that case we should increment bl->head 96795bbbc8SHao Xu * to monopolize the buffer. 97795bbbc8SHao Xu */ 98795bbbc8SHao Xu if (req->buf_list) { 99795bbbc8SHao Xu req->buf_index = req->buf_list->bgid; 10035c8711cSJens Axboe req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); 10189d528baSDylan Yudaken return true; 102795bbbc8SHao Xu } 10389d528baSDylan Yudaken return false; 104795bbbc8SHao Xu } 105024b8fdeSHao Xu 1063b77495aSJens Axboe static inline bool io_do_buffer_select(struct io_kiocb *req) 1073b77495aSJens Axboe { 1083b77495aSJens Axboe if (!(req->flags & REQ_F_BUFFER_SELECT)) 1093b77495aSJens Axboe return false; 1103b77495aSJens Axboe return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); 1113b77495aSJens Axboe } 1123b77495aSJens Axboe 11389d528baSDylan Yudaken static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) 1143b77495aSJens Axboe { 115186daf23SJens Axboe if (req->flags & REQ_F_BL_NO_RECYCLE) 116186daf23SJens Axboe return false; 117024b8fdeSHao Xu if (req->flags & REQ_F_BUFFER_SELECTED) 11889d528baSDylan Yudaken return io_kbuf_recycle_legacy(req, issue_flags); 119024b8fdeSHao Xu if (req->flags & REQ_F_BUFFER_RING) 12089d528baSDylan Yudaken return io_kbuf_recycle_ring(req); 12189d528baSDylan Yudaken return false; 1223b77495aSJens Axboe } 1233b77495aSJens Axboe 124*2c8fa70bSJens Axboe /* Mapped buffer ring, return io_uring_buf from head */ 125*2c8fa70bSJens Axboe #define io_ring_head_to_buf(br, head, mask) &(br)->bufs[(head) & (mask)] 126*2c8fa70bSJens Axboe 127ecd5c9b2SJens Axboe static inline void io_kbuf_commit(struct io_kiocb *req, 128ecd5c9b2SJens Axboe struct io_buffer_list *bl, int nr) 129ecd5c9b2SJens Axboe { 130ecd5c9b2SJens Axboe if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT))) 131ecd5c9b2SJens Axboe return; 132ecd5c9b2SJens Axboe bl->head += nr; 133ecd5c9b2SJens Axboe req->flags &= ~REQ_F_BUFFERS_COMMIT; 134ecd5c9b2SJens Axboe } 135ecd5c9b2SJens Axboe 13635c8711cSJens Axboe static inline void __io_put_kbuf_ring(struct io_kiocb *req, int nr) 1373b77495aSJens Axboe { 13835c8711cSJens Axboe struct io_buffer_list *bl = req->buf_list; 13935c8711cSJens Axboe 14035c8711cSJens Axboe if (bl) { 141ecd5c9b2SJens Axboe io_kbuf_commit(req, bl, nr); 14235c8711cSJens Axboe req->buf_index = bl->bgid; 14332f3c434SDylan Yudaken } 1443b77495aSJens Axboe req->flags &= ~REQ_F_BUFFER_RING; 1458435c6f3SJens Axboe } 1468435c6f3SJens Axboe 1478435c6f3SJens Axboe static inline void __io_put_kbuf_list(struct io_kiocb *req, 1488435c6f3SJens Axboe struct list_head *list) 1498435c6f3SJens Axboe { 1508435c6f3SJens Axboe if (req->flags & REQ_F_BUFFER_RING) { 15135c8711cSJens Axboe __io_put_kbuf_ring(req, 1); 1523b77495aSJens Axboe } else { 15332f3c434SDylan Yudaken req->buf_index = req->kbuf->bgid; 1543b77495aSJens Axboe list_add(&req->kbuf->list, list); 1553b77495aSJens Axboe req->flags &= ~REQ_F_BUFFER_SELECTED; 1563b77495aSJens Axboe } 1573b77495aSJens Axboe } 1583b77495aSJens Axboe 159bbbef3e9SMing Lei static inline void io_kbuf_drop(struct io_kiocb *req) 1603b77495aSJens Axboe { 1613b77495aSJens Axboe lockdep_assert_held(&req->ctx->completion_lock); 1623b77495aSJens Axboe 1633b77495aSJens Axboe if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))) 164bbbef3e9SMing Lei return; 1658435c6f3SJens Axboe 1668435c6f3SJens Axboe __io_put_kbuf_list(req, &req->ctx->io_buffers_comp); 1673b77495aSJens Axboe } 1683b77495aSJens Axboe 16935c8711cSJens Axboe static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int nbufs, 1703b77495aSJens Axboe unsigned issue_flags) 1713b77495aSJens Axboe { 1728435c6f3SJens Axboe unsigned int ret; 1733b77495aSJens Axboe 1748435c6f3SJens Axboe if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) 1753b77495aSJens Axboe return 0; 1768435c6f3SJens Axboe 1778435c6f3SJens Axboe ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT); 1788435c6f3SJens Axboe if (req->flags & REQ_F_BUFFER_RING) 17935c8711cSJens Axboe __io_put_kbuf_ring(req, nbufs); 1808435c6f3SJens Axboe else 1818435c6f3SJens Axboe __io_put_kbuf(req, issue_flags); 1828435c6f3SJens Axboe return ret; 1833b77495aSJens Axboe } 18435c8711cSJens Axboe 18535c8711cSJens Axboe static inline unsigned int io_put_kbuf(struct io_kiocb *req, 18635c8711cSJens Axboe unsigned issue_flags) 18735c8711cSJens Axboe { 18835c8711cSJens Axboe return __io_put_kbufs(req, 1, issue_flags); 18935c8711cSJens Axboe } 19035c8711cSJens Axboe 19135c8711cSJens Axboe static inline unsigned int io_put_kbufs(struct io_kiocb *req, int nbufs, 19235c8711cSJens Axboe unsigned issue_flags) 19335c8711cSJens Axboe { 19435c8711cSJens Axboe return __io_put_kbufs(req, nbufs, issue_flags); 19535c8711cSJens Axboe } 1963b77495aSJens Axboe #endif 197