xref: /linux/io_uring/kbuf.h (revision 06bc7ff0a1e0f2b0102e1314e3527a7ec0997851)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef IOU_KBUF_H
3 #define IOU_KBUF_H
4 
5 #include <uapi/linux/io_uring.h>
6 #include <linux/io_uring_types.h>
7 
8 enum {
9 	/* ring mapped provided buffers */
10 	IOBL_BUF_RING	= 1,
11 	/* buffers are consumed incrementally rather than always fully */
12 	IOBL_INC	= 2,
13 };
14 
15 struct io_buffer_list {
16 	/*
17 	 * If the IOBL_BUF_RING flag is set, then buf_ring is used. If not, then
18 	 * these are classic provided buffers and ->buf_list is used.
19 	 */
20 	union {
21 		struct list_head buf_list;
22 		struct io_uring_buf_ring *buf_ring;
23 	};
24 	/* count of classic/legacy buffers in buffer list */
25 	int nbufs;
26 
27 	__u16 bgid;
28 
29 	/* below is for ring provided buffers */
30 	__u16 head;
31 	__u16 mask;
32 
33 	__u16 flags;
34 
35 	/*
36 	 * minimum required amount to be left to reuse an incrementally
37 	 * consumed buffer. If less than this is left at consumption time,
38 	 * buffer is done and head is incremented to the next buffer.
39 	 */
40 	__u32 min_left_sub_one;
41 
42 	struct io_mapped_region region;
43 };
44 
45 struct io_buffer {
46 	struct list_head list;
47 	__u64 addr;
48 	__u32 len;
49 	__u16 bid;
50 	__u16 bgid;
51 };
52 
53 enum {
54 	/* can alloc a bigger vec */
55 	KBUF_MODE_EXPAND	= 1,
56 	/* if bigger vec allocated, free old one */
57 	KBUF_MODE_FREE		= 2,
58 };
59 
60 struct buf_sel_arg {
61 	struct iovec *iovs;
62 	size_t out_len;
63 	size_t max_len;
64 	unsigned short nr_iovs;
65 	unsigned short mode;
66 	unsigned short buf_group;
67 	unsigned short partial_map;
68 };
69 
70 struct io_br_sel io_buffer_select(struct io_kiocb *req, size_t *len,
71 				  unsigned buf_group, unsigned int issue_flags);
72 int io_buffers_select(struct io_kiocb *req, struct buf_sel_arg *arg,
73 		      struct io_br_sel *sel, unsigned int issue_flags);
74 int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg,
75 		    struct io_br_sel *sel);
76 void io_destroy_buffers(struct io_ring_ctx *ctx);
77 
78 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
79 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
80 int io_manage_buffers_legacy(struct io_kiocb *req, unsigned int issue_flags);
81 
82 int io_register_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
83 int io_unregister_pbuf_ring(struct io_ring_ctx *ctx, void __user *arg);
84 int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
85 
86 bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
87 void io_kbuf_drop_legacy(struct io_kiocb *req);
88 
89 unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
90 			    int len, int nbufs);
91 bool io_kbuf_commit(struct io_kiocb *req,
92 		    struct io_buffer_list *bl, int len, int nr);
93 
94 struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
95 					    unsigned int bgid);
96 
io_kbuf_recycle_ring(struct io_kiocb * req,struct io_buffer_list * bl)97 static inline bool io_kbuf_recycle_ring(struct io_kiocb *req,
98 					struct io_buffer_list *bl)
99 {
100 	if (bl) {
101 		req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
102 		return true;
103 	}
104 	return false;
105 }
106 
io_do_buffer_select(struct io_kiocb * req)107 static inline bool io_do_buffer_select(struct io_kiocb *req)
108 {
109 	if (!(req->flags & REQ_F_BUFFER_SELECT))
110 		return false;
111 	return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
112 }
113 
io_kbuf_recycle(struct io_kiocb * req,struct io_buffer_list * bl,unsigned issue_flags)114 static inline bool io_kbuf_recycle(struct io_kiocb *req, struct io_buffer_list *bl,
115 				   unsigned issue_flags)
116 {
117 	if (req->flags & REQ_F_BL_NO_RECYCLE)
118 		return false;
119 	if (req->flags & REQ_F_BUFFER_RING)
120 		return io_kbuf_recycle_ring(req, bl);
121 	if (req->flags & REQ_F_BUFFER_SELECTED)
122 		return io_kbuf_recycle_legacy(req, issue_flags);
123 	return false;
124 }
125 
io_put_kbuf(struct io_kiocb * req,int len,struct io_buffer_list * bl)126 static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
127 				       struct io_buffer_list *bl)
128 {
129 	if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
130 		return 0;
131 	return __io_put_kbufs(req, bl, len, 1);
132 }
133 
io_put_kbufs(struct io_kiocb * req,int len,struct io_buffer_list * bl,int nbufs)134 static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
135 					struct io_buffer_list *bl, int nbufs)
136 {
137 	if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
138 		return 0;
139 	return __io_put_kbufs(req, bl, len, nbufs);
140 }
141 #endif
142