xref: /linux/tools/testing/selftests/ublk/file_backed.c (revision 25489a4f556414445d342951615178368ee45cde)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include "kublk.h"
4 
5 static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int zc)
6 {
7 	unsigned ublk_op = ublksrv_get_op(iod);
8 
9 	if (ublk_op == UBLK_IO_OP_READ)
10 		return zc ? IORING_OP_READ_FIXED : IORING_OP_READ;
11 	else if (ublk_op == UBLK_IO_OP_WRITE)
12 		return zc ? IORING_OP_WRITE_FIXED : IORING_OP_WRITE;
13 	assert(0);
14 }
15 
16 static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
17 {
18 	unsigned ublk_op = ublksrv_get_op(iod);
19 	struct io_uring_sqe *sqe[1];
20 
21 	ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
22 	io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
23 	io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
24 	/* bit63 marks us as tgt io */
25 	sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
26 	return 1;
27 }
28 
29 static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
30 {
31 	unsigned ublk_op = ublksrv_get_op(iod);
32 	unsigned zc = ublk_queue_use_zc(q);
33 	unsigned auto_zc = ublk_queue_use_auto_zc(q);
34 	enum io_uring_op op = ublk_to_uring_op(iod, zc | auto_zc);
35 	struct io_uring_sqe *sqe[3];
36 	void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr;
37 
38 	if (!zc || auto_zc) {
39 		ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1);
40 		if (!sqe[0])
41 			return -ENOMEM;
42 
43 		io_uring_prep_rw(op, sqe[0], 1 /*fds[1]*/,
44 				addr,
45 				iod->nr_sectors << 9,
46 				iod->start_sector << 9);
47 		if (auto_zc)
48 			sqe[0]->buf_index = tag;
49 		io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
50 		/* bit63 marks us as tgt io */
51 		sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
52 		return 1;
53 	}
54 
55 	ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3);
56 
57 	io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
58 	sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
59 	sqe[0]->user_data = build_user_data(tag,
60 			ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
61 
62 	io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0,
63 		iod->nr_sectors << 9,
64 		iod->start_sector << 9);
65 	sqe[1]->buf_index = tag;
66 	sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
67 	sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
68 
69 	io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
70 	sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
71 
72 	return 2;
73 }
74 
75 static int loop_queue_tgt_io(struct ublk_queue *q, int tag)
76 {
77 	const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
78 	unsigned ublk_op = ublksrv_get_op(iod);
79 	int ret;
80 
81 	switch (ublk_op) {
82 	case UBLK_IO_OP_FLUSH:
83 		ret = loop_queue_flush_io(q, iod, tag);
84 		break;
85 	case UBLK_IO_OP_WRITE_ZEROES:
86 	case UBLK_IO_OP_DISCARD:
87 		ret = -ENOTSUP;
88 		break;
89 	case UBLK_IO_OP_READ:
90 	case UBLK_IO_OP_WRITE:
91 		ret = loop_queue_tgt_rw_io(q, iod, tag);
92 		break;
93 	default:
94 		ret = -EINVAL;
95 		break;
96 	}
97 
98 	ublk_dbg(UBLK_DBG_IO, "%s: tag %d ublk io %x %llx %u\n", __func__, tag,
99 			iod->op_flags, iod->start_sector, iod->nr_sectors << 9);
100 	return ret;
101 }
102 
103 static int ublk_loop_queue_io(struct ublk_queue *q, int tag)
104 {
105 	int queued = loop_queue_tgt_io(q, tag);
106 
107 	ublk_queued_tgt_io(q, tag, queued);
108 	return 0;
109 }
110 
111 static void ublk_loop_io_done(struct ublk_queue *q, int tag,
112 		const struct io_uring_cqe *cqe)
113 {
114 	unsigned op = user_data_to_op(cqe->user_data);
115 	struct ublk_io *io = ublk_get_io(q, tag);
116 
117 	if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
118 		if (!io->result)
119 			io->result = cqe->res;
120 		if (cqe->res < 0)
121 			ublk_err("%s: io failed op %x user_data %lx\n",
122 					__func__, op, cqe->user_data);
123 	}
124 
125 	/* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
126 	if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
127 		io->tgt_ios += 1;
128 
129 	if (ublk_completed_tgt_io(q, tag))
130 		ublk_complete_io(q, tag, io->result);
131 }
132 
133 static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
134 {
135 	unsigned long long bytes;
136 	int ret;
137 	struct ublk_params p = {
138 		.types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN,
139 		.basic = {
140 			.attrs = UBLK_ATTR_VOLATILE_CACHE,
141 			.logical_bs_shift	= 9,
142 			.physical_bs_shift	= 12,
143 			.io_opt_shift	= 12,
144 			.io_min_shift	= 9,
145 			.max_sectors = dev->dev_info.max_io_buf_bytes >> 9,
146 		},
147 		.dma = {
148 			.alignment = 511,
149 		},
150 	};
151 
152 	if (ctx->auto_zc_fallback) {
153 		ublk_err("%s: not support auto_zc_fallback\n", __func__);
154 		return -EINVAL;
155 	}
156 
157 	ret = backing_file_tgt_init(dev);
158 	if (ret)
159 		return ret;
160 
161 	if (dev->tgt.nr_backing_files != 1)
162 		return -EINVAL;
163 
164 	bytes = dev->tgt.backing_file_size[0];
165 	dev->tgt.dev_size = bytes;
166 	p.basic.dev_sectors = bytes >> 9;
167 	dev->tgt.params = p;
168 
169 	return 0;
170 }
171 
172 const struct ublk_tgt_ops loop_tgt_ops = {
173 	.name = "loop",
174 	.init_tgt = ublk_loop_tgt_init,
175 	.deinit_tgt = backing_file_tgt_deinit,
176 	.queue_io = ublk_loop_queue_io,
177 	.tgt_io_done = ublk_loop_io_done,
178 };
179