xref: /linux/tools/testing/selftests/ublk/batch.c (revision d468930a019df71951a80fde20f6348136a2175d)
1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Description: UBLK_F_BATCH_IO buffer management
4  */
5 
6 #include "kublk.h"
7 
8 static inline void *ublk_get_commit_buf(struct ublk_thread *t,
9 					unsigned short buf_idx)
10 {
11 	unsigned idx;
12 
13 	if (buf_idx < t->commit_buf_start ||
14 			buf_idx >= t->commit_buf_start + t->nr_commit_buf)
15 		return NULL;
16 	idx = buf_idx - t->commit_buf_start;
17 	return t->commit_buf + idx * t->commit_buf_size;
18 }
19 
20 /*
21  * Allocate one buffer for UBLK_U_IO_PREP_IO_CMDS or UBLK_U_IO_COMMIT_IO_CMDS
22  *
23  * Buffer index is returned.
24  */
25 static inline unsigned short ublk_alloc_commit_buf(struct ublk_thread *t)
26 {
27 	int idx = allocator_get(&t->commit_buf_alloc);
28 
29 	if (idx >= 0)
30 		return  idx + t->commit_buf_start;
31 	return UBLKS_T_COMMIT_BUF_INV_IDX;
32 }
33 
34 /*
35  * Free one commit buffer which is used by UBLK_U_IO_PREP_IO_CMDS or
36  * UBLK_U_IO_COMMIT_IO_CMDS
37  */
38 static inline void ublk_free_commit_buf(struct ublk_thread *t,
39 					 unsigned short i)
40 {
41 	unsigned short idx = i - t->commit_buf_start;
42 
43 	ublk_assert(idx < t->nr_commit_buf);
44 	ublk_assert(allocator_get_val(&t->commit_buf_alloc, idx) != 0);
45 
46 	allocator_put(&t->commit_buf_alloc, idx);
47 }
48 
49 static unsigned char ublk_commit_elem_buf_size(struct ublk_dev *dev)
50 {
51 	if (dev->dev_info.flags & (UBLK_F_SUPPORT_ZERO_COPY | UBLK_F_USER_COPY |
52 				UBLK_F_AUTO_BUF_REG))
53 		return 8;
54 
55 	/* one extra 8bytes for carrying buffer address */
56 	return 16;
57 }
58 
59 static unsigned ublk_commit_buf_size(struct ublk_thread *t)
60 {
61 	struct ublk_dev *dev = t->dev;
62 	unsigned elem_size = ublk_commit_elem_buf_size(dev);
63 	unsigned int total = elem_size * dev->dev_info.queue_depth;
64 	unsigned int page_sz = getpagesize();
65 
66 	return round_up(total, page_sz);
67 }
68 
69 static void free_batch_commit_buf(struct ublk_thread *t)
70 {
71 	if (t->commit_buf) {
72 		unsigned buf_size = ublk_commit_buf_size(t);
73 		unsigned int total = buf_size * t->nr_commit_buf;
74 
75 		munlock(t->commit_buf, total);
76 		free(t->commit_buf);
77 	}
78 	allocator_deinit(&t->commit_buf_alloc);
79 }
80 
81 static int alloc_batch_commit_buf(struct ublk_thread *t)
82 {
83 	unsigned buf_size = ublk_commit_buf_size(t);
84 	unsigned int total = buf_size * t->nr_commit_buf;
85 	unsigned int page_sz = getpagesize();
86 	void *buf = NULL;
87 	int ret;
88 
89 	allocator_init(&t->commit_buf_alloc, t->nr_commit_buf);
90 
91 	t->commit_buf = NULL;
92 	ret = posix_memalign(&buf, page_sz, total);
93 	if (ret || !buf)
94 		goto fail;
95 
96 	t->commit_buf = buf;
97 
98 	/* lock commit buffer pages for fast access */
99 	if (mlock(t->commit_buf, total))
100 		ublk_err("%s: can't lock commit buffer %s\n", __func__,
101 			strerror(errno));
102 
103 	return 0;
104 
105 fail:
106 	free_batch_commit_buf(t);
107 	return ret;
108 }
109 
110 void ublk_batch_prepare(struct ublk_thread *t)
111 {
112 	/*
113 	 * We only handle single device in this thread context.
114 	 *
115 	 * All queues have same feature flags, so use queue 0's for
116 	 * calculate uring_cmd flags.
117 	 *
118 	 * This way looks not elegant, but it works so far.
119 	 */
120 	struct ublk_queue *q = &t->dev->q[0];
121 
122 	t->commit_buf_elem_size = ublk_commit_elem_buf_size(t->dev);
123 	t->commit_buf_size = ublk_commit_buf_size(t);
124 	t->commit_buf_start = t->nr_bufs;
125 	t->nr_commit_buf = 2;
126 	t->nr_bufs += t->nr_commit_buf;
127 
128 	t->cmd_flags = 0;
129 	if (ublk_queue_use_auto_zc(q)) {
130 		if (ublk_queue_auto_zc_fallback(q))
131 			t->cmd_flags |= UBLK_BATCH_F_AUTO_BUF_REG_FALLBACK;
132 	} else if (!ublk_queue_no_buf(q))
133 		t->cmd_flags |= UBLK_BATCH_F_HAS_BUF_ADDR;
134 
135 	t->state |= UBLKS_T_BATCH_IO;
136 
137 	ublk_log("%s: thread %d commit(nr_bufs %u, buf_size %u, start %u)\n",
138 			__func__, t->idx,
139 			t->nr_commit_buf, t->commit_buf_size,
140 			t->nr_bufs);
141 }
142 
143 int ublk_batch_alloc_buf(struct ublk_thread *t)
144 {
145 	ublk_assert(t->nr_commit_buf < 16);
146 	return alloc_batch_commit_buf(t);
147 }
148 
149 void ublk_batch_free_buf(struct ublk_thread *t)
150 {
151 	free_batch_commit_buf(t);
152 }
153 
154 static void ublk_init_batch_cmd(struct ublk_thread *t, __u16 q_id,
155 				struct io_uring_sqe *sqe, unsigned op,
156 				unsigned short elem_bytes,
157 				unsigned short nr_elem,
158 				unsigned short buf_idx)
159 {
160 	struct ublk_batch_io *cmd;
161 	__u64 user_data;
162 
163 	cmd = (struct ublk_batch_io *)ublk_get_sqe_cmd(sqe);
164 
165 	ublk_set_sqe_cmd_op(sqe, op);
166 
167 	sqe->fd	= 0;	/* dev->fds[0] */
168 	sqe->opcode	= IORING_OP_URING_CMD;
169 	sqe->flags	= IOSQE_FIXED_FILE;
170 
171 	cmd->q_id	= q_id;
172 	cmd->flags	= 0;
173 	cmd->reserved 	= 0;
174 	cmd->elem_bytes = elem_bytes;
175 	cmd->nr_elem	= nr_elem;
176 
177 	user_data = build_user_data(buf_idx, _IOC_NR(op), 0, q_id, 0);
178 	io_uring_sqe_set_data64(sqe, user_data);
179 
180 	t->cmd_inflight += 1;
181 
182 	ublk_dbg(UBLK_DBG_IO_CMD, "%s: thread %u qid %d cmd_op %x data %lx "
183 			"nr_elem %u elem_bytes %u buf_size %u buf_idx %d "
184 			"cmd_inflight %u\n",
185 			__func__, t->idx, q_id, op, user_data,
186 			cmd->nr_elem, cmd->elem_bytes,
187 			nr_elem * elem_bytes, buf_idx, t->cmd_inflight);
188 }
189 
190 static void ublk_setup_commit_sqe(struct ublk_thread *t,
191 				  struct io_uring_sqe *sqe,
192 				  unsigned short buf_idx)
193 {
194 	struct ublk_batch_io *cmd;
195 
196 	cmd = (struct ublk_batch_io *)ublk_get_sqe_cmd(sqe);
197 
198 	/* Use plain user buffer instead of fixed buffer */
199 	cmd->flags |= t->cmd_flags;
200 }
201 
202 int ublk_batch_queue_prep_io_cmds(struct ublk_thread *t, struct ublk_queue *q)
203 {
204 	unsigned short nr_elem = q->q_depth;
205 	unsigned short buf_idx = ublk_alloc_commit_buf(t);
206 	struct io_uring_sqe *sqe;
207 	void *buf;
208 	int i;
209 
210 	ublk_assert(buf_idx != UBLKS_T_COMMIT_BUF_INV_IDX);
211 
212 	ublk_io_alloc_sqes(t, &sqe, 1);
213 
214 	ublk_assert(nr_elem == q->q_depth);
215 	buf = ublk_get_commit_buf(t, buf_idx);
216 	for (i = 0; i < nr_elem; i++) {
217 		struct ublk_batch_elem *elem = (struct ublk_batch_elem *)(
218 				buf + i * t->commit_buf_elem_size);
219 		struct ublk_io *io = &q->ios[i];
220 
221 		elem->tag = i;
222 		elem->result = 0;
223 
224 		if (ublk_queue_use_auto_zc(q))
225 			elem->buf_index = ublk_batch_io_buf_idx(t, q, i);
226 		else if (!ublk_queue_no_buf(q))
227 			elem->buf_addr = (__u64)io->buf_addr;
228 	}
229 
230 	sqe->addr = (__u64)buf;
231 	sqe->len = t->commit_buf_elem_size * nr_elem;
232 
233 	ublk_init_batch_cmd(t, q->q_id, sqe, UBLK_U_IO_PREP_IO_CMDS,
234 			t->commit_buf_elem_size, nr_elem, buf_idx);
235 	ublk_setup_commit_sqe(t, sqe, buf_idx);
236 	return 0;
237 }
238 
239 static void ublk_batch_compl_commit_cmd(struct ublk_thread *t,
240 					const struct io_uring_cqe *cqe,
241 					unsigned op)
242 {
243 	unsigned short buf_idx = user_data_to_tag(cqe->user_data);
244 
245 	if (op == _IOC_NR(UBLK_U_IO_PREP_IO_CMDS))
246 		ublk_assert(cqe->res == 0);
247 	else if (op == _IOC_NR(UBLK_U_IO_COMMIT_IO_CMDS))
248 		;//assert(cqe->res == t->commit_buf_size);
249 	else
250 		ublk_assert(0);
251 
252 	ublk_free_commit_buf(t, buf_idx);
253 }
254 
255 void ublk_batch_compl_cmd(struct ublk_thread *t,
256 			  const struct io_uring_cqe *cqe)
257 {
258 	unsigned op = user_data_to_op(cqe->user_data);
259 
260 	if (op == _IOC_NR(UBLK_U_IO_PREP_IO_CMDS) ||
261 			op == _IOC_NR(UBLK_U_IO_COMMIT_IO_CMDS)) {
262 		t->cmd_inflight--;
263 		ublk_batch_compl_commit_cmd(t, cqe, op);
264 		return;
265 	}
266 }
267