xref: /linux/tools/testing/selftests/ublk/kublk.h (revision c284d3e423382be3591d5b1e402e330e6c3f726c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef KUBLK_INTERNAL_H
3 #define KUBLK_INTERNAL_H
4 
5 #include <unistd.h>
6 #include <stdlib.h>
7 #include <assert.h>
8 #include <stdio.h>
9 #include <stdarg.h>
10 #include <string.h>
11 #include <pthread.h>
12 #include <getopt.h>
13 #include <limits.h>
14 #include <poll.h>
15 #include <fcntl.h>
16 #include <sys/syscall.h>
17 #include <sys/mman.h>
18 #include <sys/ioctl.h>
19 #include <sys/inotify.h>
20 #include <sys/wait.h>
21 #include <sys/eventfd.h>
22 #include <sys/uio.h>
23 #include <liburing.h>
24 #include <linux/ublk_cmd.h>
25 #include "ublk_dep.h"
26 
27 #define __maybe_unused __attribute__((unused))
28 #define MAX_BACK_FILES   4
29 #ifndef min
30 #define min(a, b) ((a) < (b) ? (a) : (b))
31 #endif
32 
33 /****************** part 1: libublk ********************/
34 
35 #define CTRL_DEV		"/dev/ublk-control"
36 #define UBLKC_DEV		"/dev/ublkc"
37 #define UBLKB_DEV		"/dev/ublkb"
38 #define UBLK_CTRL_RING_DEPTH            32
39 #define ERROR_EVTFD_DEVID 	-2
40 
41 /* queue idle timeout */
42 #define UBLKSRV_IO_IDLE_SECS		20
43 
44 #define UBLK_IO_MAX_BYTES               (1 << 20)
45 #define UBLK_MAX_QUEUES                 4
46 #define UBLK_QUEUE_DEPTH                128
47 
48 #define UBLK_DBG_DEV            (1U << 0)
49 #define UBLK_DBG_QUEUE          (1U << 1)
50 #define UBLK_DBG_IO_CMD         (1U << 2)
51 #define UBLK_DBG_IO             (1U << 3)
52 #define UBLK_DBG_CTRL_CMD       (1U << 4)
53 #define UBLK_LOG                (1U << 5)
54 
55 struct ublk_dev;
56 struct ublk_queue;
57 
58 struct dev_ctx {
59 	char tgt_type[16];
60 	unsigned long flags;
61 	unsigned nr_hw_queues;
62 	unsigned queue_depth;
63 	int dev_id;
64 	int nr_files;
65 	char *files[MAX_BACK_FILES];
66 	unsigned int	logging:1;
67 	unsigned int	all:1;
68 	unsigned int	fg:1;
69 
70 	/* stripe */
71 	unsigned int    chunk_size;
72 
73 	int _evtfd;
74 };
75 
76 struct ublk_ctrl_cmd_data {
77 	__u32 cmd_op;
78 #define CTRL_CMD_HAS_DATA	1
79 #define CTRL_CMD_HAS_BUF	2
80 	__u32 flags;
81 
82 	__u64 data[2];
83 	__u64 addr;
84 	__u32 len;
85 };
86 
87 struct ublk_io {
88 	char *buf_addr;
89 
90 #define UBLKSRV_NEED_FETCH_RQ		(1UL << 0)
91 #define UBLKSRV_NEED_COMMIT_RQ_COMP	(1UL << 1)
92 #define UBLKSRV_IO_FREE			(1UL << 2)
93 	unsigned short flags;
94 	unsigned short refs;		/* used by target code only */
95 
96 	int result;
97 
98 	unsigned short tgt_ios;
99 	void *private_data;
100 };
101 
102 struct ublk_tgt_ops {
103 	const char *name;
104 	int (*init_tgt)(const struct dev_ctx *ctx, struct ublk_dev *);
105 	void (*deinit_tgt)(struct ublk_dev *);
106 
107 	int (*queue_io)(struct ublk_queue *, int tag);
108 	void (*tgt_io_done)(struct ublk_queue *,
109 			int tag, const struct io_uring_cqe *);
110 };
111 
112 struct ublk_tgt {
113 	unsigned long dev_size;
114 	unsigned int  sq_depth;
115 	unsigned int  cq_depth;
116 	const struct ublk_tgt_ops *ops;
117 	struct ublk_params params;
118 
119 	int nr_backing_files;
120 	unsigned long backing_file_size[MAX_BACK_FILES];
121 	char backing_file[MAX_BACK_FILES][PATH_MAX];
122 };
123 
124 struct ublk_queue {
125 	int q_id;
126 	int q_depth;
127 	unsigned int cmd_inflight;
128 	unsigned int io_inflight;
129 	struct ublk_dev *dev;
130 	const struct ublk_tgt_ops *tgt_ops;
131 	struct ublksrv_io_desc *io_cmd_buf;
132 	struct io_uring ring;
133 	struct ublk_io ios[UBLK_QUEUE_DEPTH];
134 #define UBLKSRV_QUEUE_STOPPING	(1U << 0)
135 #define UBLKSRV_QUEUE_IDLE	(1U << 1)
136 #define UBLKSRV_NO_BUF		(1U << 2)
137 #define UBLKSRV_ZC		(1U << 3)
138 	unsigned state;
139 	pid_t tid;
140 	pthread_t thread;
141 };
142 
143 struct ublk_dev {
144 	struct ublk_tgt tgt;
145 	struct ublksrv_ctrl_dev_info  dev_info;
146 	struct ublk_queue q[UBLK_MAX_QUEUES];
147 
148 	int fds[MAX_BACK_FILES + 1];	/* fds[0] points to /dev/ublkcN */
149 	int nr_fds;
150 	int ctrl_fd;
151 	struct io_uring ring;
152 
153 	void *private_data;
154 };
155 
156 #ifndef offsetof
157 #define offsetof(TYPE, MEMBER)  ((size_t)&((TYPE *)0)->MEMBER)
158 #endif
159 
160 #ifndef container_of
161 #define container_of(ptr, type, member) ({                              \
162 	unsigned long __mptr = (unsigned long)(ptr);                    \
163 	((type *)(__mptr - offsetof(type, member))); })
164 #endif
165 
166 #define round_up(val, rnd) \
167 	(((val) + ((rnd) - 1)) & ~((rnd) - 1))
168 
169 
170 extern unsigned int ublk_dbg_mask;
171 extern int ublk_queue_io_cmd(struct ublk_queue *q, struct ublk_io *io, unsigned tag);
172 
173 static inline int is_target_io(__u64 user_data)
174 {
175 	return (user_data & (1ULL << 63)) != 0;
176 }
177 
178 static inline __u64 build_user_data(unsigned tag, unsigned op,
179 		unsigned tgt_data, unsigned is_target_io)
180 {
181 	assert(!(tag >> 16) && !(op >> 8) && !(tgt_data >> 16));
182 
183 	return tag | (op << 16) | (tgt_data << 24) | (__u64)is_target_io << 63;
184 }
185 
186 static inline unsigned int user_data_to_tag(__u64 user_data)
187 {
188 	return user_data & 0xffff;
189 }
190 
191 static inline unsigned int user_data_to_op(__u64 user_data)
192 {
193 	return (user_data >> 16) & 0xff;
194 }
195 
196 static inline unsigned int user_data_to_tgt_data(__u64 user_data)
197 {
198 	return (user_data >> 24) & 0xffff;
199 }
200 
201 static inline unsigned short ublk_cmd_op_nr(unsigned int op)
202 {
203 	return _IOC_NR(op);
204 }
205 
206 static inline void ublk_err(const char *fmt, ...)
207 {
208 	va_list ap;
209 
210 	va_start(ap, fmt);
211 	vfprintf(stderr, fmt, ap);
212 }
213 
214 static inline void ublk_log(const char *fmt, ...)
215 {
216 	if (ublk_dbg_mask & UBLK_LOG) {
217 		va_list ap;
218 
219 		va_start(ap, fmt);
220 		vfprintf(stdout, fmt, ap);
221 	}
222 }
223 
224 static inline void ublk_dbg(int level, const char *fmt, ...)
225 {
226 	if (level & ublk_dbg_mask) {
227 		va_list ap;
228 
229 		va_start(ap, fmt);
230 		vfprintf(stdout, fmt, ap);
231 	}
232 }
233 
234 static inline int ublk_queue_alloc_sqes(struct ublk_queue *q,
235 		struct io_uring_sqe *sqes[], int nr_sqes)
236 {
237 	unsigned left = io_uring_sq_space_left(&q->ring);
238 	int i;
239 
240 	if (left < nr_sqes)
241 		io_uring_submit(&q->ring);
242 
243 	for (i = 0; i < nr_sqes; i++) {
244 		sqes[i] = io_uring_get_sqe(&q->ring);
245 		if (!sqes[i])
246 			return i;
247 	}
248 
249 	return nr_sqes;
250 }
251 
252 static inline void io_uring_prep_buf_register(struct io_uring_sqe *sqe,
253 		int dev_fd, int tag, int q_id, __u64 index)
254 {
255 	struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
256 
257 	io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
258 	sqe->opcode		= IORING_OP_URING_CMD;
259 	sqe->flags		|= IOSQE_FIXED_FILE;
260 	sqe->cmd_op		= UBLK_U_IO_REGISTER_IO_BUF;
261 
262 	cmd->tag		= tag;
263 	cmd->addr		= index;
264 	cmd->q_id		= q_id;
265 }
266 
267 static inline void io_uring_prep_buf_unregister(struct io_uring_sqe *sqe,
268 		int dev_fd, int tag, int q_id, __u64 index)
269 {
270 	struct ublksrv_io_cmd *cmd = (struct ublksrv_io_cmd *)sqe->cmd;
271 
272 	io_uring_prep_read(sqe, dev_fd, 0, 0, 0);
273 	sqe->opcode		= IORING_OP_URING_CMD;
274 	sqe->flags		|= IOSQE_FIXED_FILE;
275 	sqe->cmd_op		= UBLK_U_IO_UNREGISTER_IO_BUF;
276 
277 	cmd->tag		= tag;
278 	cmd->addr		= index;
279 	cmd->q_id		= q_id;
280 }
281 
282 static inline void *ublk_get_sqe_cmd(const struct io_uring_sqe *sqe)
283 {
284 	return (void *)&sqe->cmd;
285 }
286 
287 static inline void ublk_set_io_res(struct ublk_queue *q, int tag, int res)
288 {
289 	q->ios[tag].result = res;
290 }
291 
292 static inline int ublk_get_io_res(const struct ublk_queue *q, unsigned tag)
293 {
294 	return q->ios[tag].result;
295 }
296 
297 static inline void ublk_mark_io_done(struct ublk_io *io, int res)
298 {
299 	io->flags |= (UBLKSRV_NEED_COMMIT_RQ_COMP | UBLKSRV_IO_FREE);
300 	io->result = res;
301 }
302 
303 static inline const struct ublksrv_io_desc *ublk_get_iod(const struct ublk_queue *q, int tag)
304 {
305 	return &q->io_cmd_buf[tag];
306 }
307 
308 static inline void ublk_set_sqe_cmd_op(struct io_uring_sqe *sqe, __u32 cmd_op)
309 {
310 	__u32 *addr = (__u32 *)&sqe->off;
311 
312 	addr[0] = cmd_op;
313 	addr[1] = 0;
314 }
315 
316 static inline struct ublk_io *ublk_get_io(struct ublk_queue *q, unsigned tag)
317 {
318 	return &q->ios[tag];
319 }
320 
321 static inline int ublk_complete_io(struct ublk_queue *q, unsigned tag, int res)
322 {
323 	struct ublk_io *io = &q->ios[tag];
324 
325 	ublk_mark_io_done(io, res);
326 
327 	return ublk_queue_io_cmd(q, io, tag);
328 }
329 
330 static inline void ublk_queued_tgt_io(struct ublk_queue *q, unsigned tag, int queued)
331 {
332 	if (queued < 0)
333 		ublk_complete_io(q, tag, queued);
334 	else {
335 		struct ublk_io *io = ublk_get_io(q, tag);
336 
337 		q->io_inflight += queued;
338 		io->tgt_ios = queued;
339 		io->result = 0;
340 	}
341 }
342 
343 static inline int ublk_completed_tgt_io(struct ublk_queue *q, unsigned tag)
344 {
345 	struct ublk_io *io = ublk_get_io(q, tag);
346 
347 	q->io_inflight--;
348 
349 	return --io->tgt_ios == 0;
350 }
351 
352 static inline int ublk_queue_use_zc(const struct ublk_queue *q)
353 {
354 	return q->state & UBLKSRV_ZC;
355 }
356 
357 extern const struct ublk_tgt_ops null_tgt_ops;
358 extern const struct ublk_tgt_ops loop_tgt_ops;
359 extern const struct ublk_tgt_ops stripe_tgt_ops;
360 
361 void backing_file_tgt_deinit(struct ublk_dev *dev);
362 int backing_file_tgt_init(struct ublk_dev *dev);
363 
364 static inline unsigned int ilog2(unsigned int x)
365 {
366 	if (x == 0)
367 		return 0;
368 	return (sizeof(x) * 8 - 1) - __builtin_clz(x);
369 }
370 #endif
371