xref: /linux/io_uring/kbuf.h (revision 71dfa617ea9f18e4585fe78364217cd32b1fc382)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4 
5 #include <uapi/linux/io_uring.h>
6 
7 struct io_buffer_list {
8 	/*
9 	 * If ->buf_nr_pages is set, then buf_pages/buf_ring are used. If not,
10 	 * then these are classic provided buffers and ->buf_list is used.
11 	 */
12 	union {
13 		struct list_head buf_list;
14 		struct {
15 			struct page **buf_pages;
16 			struct io_uring_buf_ring *buf_ring;
17 		};
18 		struct rcu_head rcu;
19 	};
20 	__u16 bgid;
21 
22 	/* below is for ring provided buffers */
23 	__u16 buf_nr_pages;
24 	__u16 nr_entries;
25 	__u16 head;
26 	__u16 mask;
27 
28 	atomic_t refs;
29 
30 	/* ring mapped provided buffers */
31 	__u8 is_buf_ring;
32 	/* ring mapped provided buffers, but mmap'ed by application */
33 	__u8 is_mmap;
34 };
35 
36 struct io_buffer {
37 	struct list_head list;
38 	__u64 addr;
39 	__u32 len;
40 	__u16 bid;
41 	__u16 bgid;
42 };
43 
44 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
45 			      unsigned int issue_flags);
46 void io_destroy_buffers(struct io_ring_ctx *ctx);
47 
48 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
49 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
50 
51 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
52 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
53 
54 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
55 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
56 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
57 
58 void io_kbuf_mmap_list_free(struct io_ring_ctx *ctx);
59 
60 void __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
61 
62 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
63 
64 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl);
65 struct io_buffer_list *io_pbuf_get_bl(struct io_ring_ctx *ctx,
66 				      unsigned long bgid);
67 
68 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
69 {
70 	/*
71 	 * We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
72 	 * the flag and hence ensure that bl->head doesn't get incremented.
73 	 * If the tail has already been incremented, hang on to it.
74 	 * The exception is partial io, that case we should increment bl->head
75 	 * to monopolize the buffer.
76 	 */
77 	if (req->buf_list) {
78 		req->buf_index = req->buf_list->bgid;
79 		req->flags &= ~REQ_F_BUFFER_RING;
80 		return true;
81 	}
82 	return false;
83 }
84 
85 static inline bool io_do_buffer_select(struct io_kiocb *req)
86 {
87 	if (!(req->flags & REQ_F_BUFFER_SELECT))
88 		return false;
89 	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
90 }
91 
92 static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
93 {
94 	if (req->flags & REQ_F_BL_NO_RECYCLE)
95 		return false;
96 	if (req->flags & REQ_F_BUFFER_SELECTED)
97 		return io_kbuf_recycle_legacy(req, issue_flags);
98 	if (req->flags & REQ_F_BUFFER_RING)
99 		return io_kbuf_recycle_ring(req);
100 	return false;
101 }
102 
103 static inline void __io_put_kbuf_ring(struct io_kiocb *req)
104 {
105 	if (req->buf_list) {
106 		req->buf_index = req->buf_list->bgid;
107 		req->buf_list->head++;
108 	}
109 	req->flags &= ~REQ_F_BUFFER_RING;
110 }
111 
112 static inline void __io_put_kbuf_list(struct io_kiocb *req,
113 				      struct list_head *list)
114 {
115 	if (req->flags & REQ_F_BUFFER_RING) {
116 		__io_put_kbuf_ring(req);
117 	} else {
118 		req->buf_index = req->kbuf->bgid;
119 		list_add(&req->kbuf->list, list);
120 		req->flags &= ~REQ_F_BUFFER_SELECTED;
121 	}
122 }
123 
124 static inline unsigned int io_put_kbuf_comp(struct io_kiocb *req)
125 {
126 	unsigned int ret;
127 
128 	lockdep_assert_held(&req->ctx->completion_lock);
129 
130 	if (!(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)))
131 		return 0;
132 
133 	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
134 	__io_put_kbuf_list(req, &req->ctx->io_buffers_comp);
135 	return ret;
136 }
137 
138 static inline unsigned int io_put_kbuf(struct io_kiocb *req,
139 				       unsigned issue_flags)
140 {
141 	unsigned int ret;
142 
143 	if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
144 		return 0;
145 
146 	ret = IORING_CQE_F_BUFFER | (req->buf_index << IORING_CQE_BUFFER_SHIFT);
147 	if (req->flags & REQ_F_BUFFER_RING)
148 		__io_put_kbuf_ring(req);
149 	else
150 		__io_put_kbuf(req, issue_flags);
151 	return ret;
152 }
153 #endif
154