1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef IOU_KBUF_H 3 #define IOU_KBUF_H 4 5 #include <uapi/linux/io_uring.h> 6 #include <linux/io_uring_types.h> 7 8 enum { 9 /* ring mapped provided buffers */ 10 IOBL_BUF_RING = 1, 11 /* buffers are consumed incrementally rather than always fully */ 12 IOBL_INC = 2, 13 }; 14 15 struct io_buffer_list { 16 /* 17 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not, 18 * then these are classic provided buffers and ->buf_list is used. 19 */ 20 union { 21 struct list_head buf_list; 22 struct io_uring_buf_ring *buf_ring; 23 }; 24 __u16 bgid; 25 26 /* below is for ring provided buffers */ 27 __u16 buf_nr_pages; 28 __u16 nr_entries; 29 __u16 head; 30 __u16 mask; 31 32 __u16 flags; 33 34 struct io_mapped_region region; 35 }; 36 37 struct io_buffer { 38 struct list_head list; 39 __u64 addr; 40 __u32 len; 41 __u16 bid; 42 __u16 bgid; 43 }; 44 45 enum { 46 /* can alloc a bigger vec */ 47 KBUF_MODE_EXPAND = 1, 48 /* if bigger vec allocated, free old one */ 49 KBUF_MODE_FREE = 2, 50 }; 51 52 struct buf_sel_arg { 53 struct iovec *iovs; 54 size_t out_len; 55 size_t max_len; 56 unsigned short nr_iovs; 57 unsigned short mode; 58 unsigned buf_group; 59 }; 60 61 void __user *io_buffer_select(struct io_kiocb *req, size_t *len, 62 unsigned buf_group, unsigned int issue_flags); 63 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg, 64 unsigned int issue_flags); 65 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg); 66 void io_destroy_buffers(struct io_ring_ctx *ctx); 67 68 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 69 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 70 int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags); 71 72 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); 73 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg); 74 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg); 75 76 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags); 77 void io_kbuf_drop_legacy(struct io_kiocb *req); 78 79 unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs); 80 bool io_kbuf_commit(struct io_kiocb *req, 81 struct io_buffer_list *bl, int len, int nr); 82 83 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx, 84 unsigned int bgid); 85 86 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req) 87 { 88 /* 89 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear 90 * the flag and hence ensure that bl->head doesn't get incremented. 91 * If the tail has already been incremented, hang on to it. 92 * The exception is partial io, that case we should increment bl->head 93 * to monopolize the buffer. 94 */ 95 if (req->buf_list) { 96 req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT); 97 return true; 98 } 99 return false; 100 } 101 102 static inline bool io_do_buffer_select(struct io_kiocb *req) 103 { 104 if (!(req->flags & REQ_F_BUFFER_SELECT)) 105 return false; 106 return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)); 107 } 108 109 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags) 110 { 111 if (req->flags & REQ_F_BL_NO_RECYCLE) 112 return false; 113 if (req->flags & REQ_F_BUFFER_SELECTED) 114 return io_kbuf_recycle_legacy(req, issue_flags); 115 if (req->flags & REQ_F_BUFFER_RING) 116 return io_kbuf_recycle_ring(req); 117 return false; 118 } 119 120 static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len, 121 unsigned issue_flags) 122 { 123 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) 124 return 0; 125 return __io_put_kbufs(req, len, 1); 126 } 127 128 static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len, 129 int nbufs, unsigned issue_flags) 130 { 131 if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED))) 132 return 0; 133 return __io_put_kbufs(req, len, nbufs); 134 } 135 #endif 136