xref: /linux/io_uring/kbuf.h (revision 3a4d319a8fb5a9bbdf5b31ef32841eb286b1dcc2)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4 
5 #include <uapi/linux/io_uring.h>
6 
7 enum {
8 	/* ring mapped provided buffers */
9 	IOBL_BUF_RING	= 1,
10 	/* ring mapped provided buffers, but mmap'ed by application */
11 	IOBL_MMAP	= 2,
12 	/* buffers are consumed incrementally rather than always fully */
13 	IOBL_INC	= 4,
14 
15 };
16 
17 struct io_buffer_list {
18 	/*
19 	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
20 	 * then these are classic provided buffers and ->buf_list is used.
21 	 */
22 	union {
23 		struct list_head buf_list;
24 		struct {
25 			struct page **buf_pages;
26 			struct io_uring_buf_ring *buf_ring;
27 		};
28 		struct rcu_head rcu;
29 	};
30 	__u16 bgid;
31 
32 	/* below is for ring provided buffers */
33 	__u16 buf_nr_pages;
34 	__u16 nr_entries;
35 	__u16 head;
36 	__u16 mask;
37 
38 	__u16 flags;
39 
40 	atomic_t refs;
41 };
42 
43 struct io_buffer {
44 	struct list_head list;
45 	__u64 addr;
46 	__u32 len;
47 	__u16 bid;
48 	__u16 bgid;
49 };
50 
51 enum {
52 	/* can alloc a bigger vec */
53 	KBUF_MODE_EXPAND	= 1,
54 	/* if bigger vec allocated, free old one */
55 	KBUF_MODE_FREE		= 2,
56 };
57 
58 struct buf_sel_arg {
59 	struct iovec *iovs;
60 	size_t out_len;
61 	size_t max_len;
62 	unsigned short nr_iovs;
63 	unsigned short mode;
64 };
65 
66 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
67 			      unsigned int issue_flags);
68 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
69 		      unsigned int issue_flags);
70 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg);
71 void io_destroy_buffers(struct io_ring_ctx *ctx);
72 
73 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
74 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
75 
76 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
77 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
78 
79 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
80 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
81 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
82 
83 void __io_put_kbuf(struct io_kiocb *req, int len, unsigned issue_flags);
84 
85 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
86 
87 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
88 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
89 				      unsigned long bgid);
90 int io_pbuf_mmap(struct file *file, struct vm_area_struct *vma);
91 
io_kbuf_recycle_ring(struct io_kiocb * req)92 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
93 {
94 	/*
95 	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
96 	 * the flag and hence ensure that bl->head doesn't get incremented.
97 	 * If the tail has already been incremented, hang on to it.
98 	 * The exception is partial io, that case we should increment bl->head
99 	 * to monopolize the buffer.
100 	 */
101 	if (req->buf_list) {
102 		req->buf_index = req->buf_list->bgid;
103 		req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
104 		return true;
105 	}
106 	return false;
107 }
108 
io_do_buffer_select(struct io_kiocb * req)109 static inline bool io_do_buffer_select(struct io_kiocb *req)
110 {
111 	if (!(req->flags & REQ_F_BUFFER_SELECT))
112 		return false;
113 	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
114 }
115 
io_kbuf_recycle(struct io_kiocb * req,unsigned issue_flags)116 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
117 {
118 	if (req->flags & REQ_F_BL_NO_RECYCLE)
119 		return false;
120 	if (req->flags & REQ_F_BUFFER_SELECTED)
121 		return io_kbuf_recycle_legacy(req, issue_flags);
122 	if (req->flags & REQ_F_BUFFER_RING)
123 		return io_kbuf_recycle_ring(req);
124 	return false;
125 }
126 
127 /* Mapped buffer ring, return io_uring_buf from head */
128 #define io_ring_head_to_buf(br, head, mask)	&(br)->bufs[(head) & (mask)]
129 
io_kbuf_commit(struct io_kiocb * req,struct io_buffer_list * bl,int len,int nr)130 static inline bool io_kbuf_commit(struct io_kiocb *req,
131 				  struct io_buffer_list *bl, int len, int nr)
132 {
133 	if (unlikely(!(req->flags & REQ_F_BUFFERS_COMMIT)))
134 		return true;
135 
136 	req->flags &= ~REQ_F_BUFFERS_COMMIT;
137 
138 	if (unlikely(len < 0))
139 		return true;
140 
141 	if (bl->flags & IOBL_INC) {
142 		struct io_uring_buf *buf;
143 
144 		buf = io_ring_head_to_buf(bl->buf_ring, bl->head, bl->mask);
145 		if (WARN_ON_ONCE(len > buf->len))
146 			len = buf->len;
147 		buf->len -= len;
148 		if (buf->len) {
149 			buf->addr += len;
150 			return false;
151 		}
152 	}
153 
154 	bl->head += nr;
155 	return true;
156 }
157 
__io_put_kbuf_ring(struct io_kiocb * req,int len,int nr)158 static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
159 {
160 	struct io_buffer_list *bl = req->buf_list;
161 	bool ret = true;
162 
163 	if (bl) {
164 		ret = io_kbuf_commit(req, bl, len, nr);
165 		req->buf_index = bl->bgid;
166 	}
167 	req->flags &= ~REQ_F_BUFFER_RING;
168 	return ret;
169 }
170 
__io_put_kbuf_list(struct io_kiocb * req,int len,struct list_head * list)171 static inline void __io_put_kbuf_list(struct io_kiocb *req, int len,
172 				      struct list_head *list)
173 {
174 	if (req->flags & REQ_F_BUFFER_RING) {
175 		__io_put_kbuf_ring(req, len, 1);
176 	} else {
177 		req->buf_index = req->kbuf->bgid;
178 		list_add(&req->kbuf->list, list);
179 		req->flags &= ~REQ_F_BUFFER_SELECTED;
180 	}
181 }
182 
io_kbuf_drop(struct io_kiocb * req)183 static inline void io_kbuf_drop(struct io_kiocb *req)
184 {
185 	lockdep_assert_held(&req->ctx->completion_lock);
186 
187 	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
188 		return;
189 
190 	/* len == 0 is fine here, non-ring will always drop all of it */
191 	__io_put_kbuf_list(req, 0, &req->ctx->io_buffers_comp);
192 }
193 
__io_put_kbufs(struct io_kiocb * req,int len,int nbufs,unsigned issue_flags)194 static inline unsigned int __io_put_kbufs(struct io_kiocb *req, int len,
195 					  int nbufs, unsigned issue_flags)
196 {
197 	unsigned int ret;
198 
199 	if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
200 		return 0;
201 
202 	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
203 	if (req->flags & REQ_F_BUFFER_RING) {
204 		if (!__io_put_kbuf_ring(req, len, nbufs))
205 			ret |= IORING_CQE_F_BUF_MORE;
206 	} else {
207 		__io_put_kbuf(req, len, issue_flags);
208 	}
209 	return ret;
210 }
211 
io_put_kbuf(struct io_kiocb * req,int len,unsigned issue_flags)212 static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
213 				       unsigned issue_flags)
214 {
215 	return __io_put_kbufs(req, len, 1, issue_flags);
216 }
217 
io_put_kbufs(struct io_kiocb * req,int len,int nbufs,unsigned issue_flags)218 static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
219 					int nbufs, unsigned issue_flags)
220 {
221 	return __io_put_kbufs(req, len, nbufs, issue_flags);
222 }
223 #endif
224