1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "kublk.h"
4
ublk_to_uring_op(const struct ublksrv_io_desc * iod,int zc)5 static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int zc)
6 {
7 unsigned ublk_op = ublksrv_get_op(iod);
8
9 if (ublk_op == UBLK_IO_OP_READ)
10 return zc ? IORING_OP_READ_FIXED : IORING_OP_READ;
11 else if (ublk_op == UBLK_IO_OP_WRITE)
12 return zc ? IORING_OP_WRITE_FIXED : IORING_OP_WRITE;
13 assert(0);
14 }
15
loop_queue_flush_io(struct ublk_thread * t,struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag)16 static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q,
17 const struct ublksrv_io_desc *iod, int tag)
18 {
19 unsigned ublk_op = ublksrv_get_op(iod);
20 struct io_uring_sqe *sqe[1];
21
22 ublk_io_alloc_sqes(t, sqe, 1);
23 io_uring_prep_fsync(sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/, IORING_FSYNC_DATASYNC);
24 io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
25 /* bit63 marks us as tgt io */
26 sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
27 return 1;
28 }
29
loop_queue_tgt_rw_io(struct ublk_thread * t,struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag)30 static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q,
31 const struct ublksrv_io_desc *iod, int tag)
32 {
33 unsigned ublk_op = ublksrv_get_op(iod);
34 unsigned zc = ublk_queue_use_zc(q);
35 unsigned auto_zc = ublk_queue_use_auto_zc(q);
36 enum io_uring_op op = ublk_to_uring_op(iod, zc | auto_zc);
37 struct io_uring_sqe *sqe[3];
38 void *addr = (zc | auto_zc) ? NULL : (void *)iod->addr;
39
40 if (!zc || auto_zc) {
41 ublk_io_alloc_sqes(t, sqe, 1);
42 if (!sqe[0])
43 return -ENOMEM;
44
45 io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/,
46 addr,
47 iod->nr_sectors << 9,
48 iod->start_sector << 9);
49 if (auto_zc)
50 sqe[0]->buf_index = tag;
51 io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
52 /* bit63 marks us as tgt io */
53 sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
54 return 1;
55 }
56
57 ublk_io_alloc_sqes(t, sqe, 3);
58
59 io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
60 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
61 sqe[0]->user_data = build_user_data(tag,
62 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
63
64 io_uring_prep_rw(op, sqe[1], ublk_get_registered_fd(q, 1) /*fds[1]*/, 0,
65 iod->nr_sectors << 9,
66 iod->start_sector << 9);
67 sqe[1]->buf_index = tag;
68 sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
69 sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1);
70
71 io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, ublk_get_io(q, tag)->buf_index);
72 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
73
74 return 2;
75 }
76
loop_queue_tgt_io(struct ublk_thread * t,struct ublk_queue * q,int tag)77 static int loop_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q, int tag)
78 {
79 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
80 unsigned ublk_op = ublksrv_get_op(iod);
81 int ret;
82
83 switch (ublk_op) {
84 case UBLK_IO_OP_FLUSH:
85 ret = loop_queue_flush_io(t, q, iod, tag);
86 break;
87 case UBLK_IO_OP_WRITE_ZEROES:
88 case UBLK_IO_OP_DISCARD:
89 ret = -ENOTSUP;
90 break;
91 case UBLK_IO_OP_READ:
92 case UBLK_IO_OP_WRITE:
93 ret = loop_queue_tgt_rw_io(t, q, iod, tag);
94 break;
95 default:
96 ret = -EINVAL;
97 break;
98 }
99
100 ublk_dbg(UBLK_DBG_IO, "%s: tag %d ublk io %x %llx %u\n", __func__, tag,
101 iod->op_flags, iod->start_sector, iod->nr_sectors << 9);
102 return ret;
103 }
104
ublk_loop_queue_io(struct ublk_thread * t,struct ublk_queue * q,int tag)105 static int ublk_loop_queue_io(struct ublk_thread *t, struct ublk_queue *q,
106 int tag)
107 {
108 int queued = loop_queue_tgt_io(t, q, tag);
109
110 ublk_queued_tgt_io(t, q, tag, queued);
111 return 0;
112 }
113
ublk_loop_io_done(struct ublk_thread * t,struct ublk_queue * q,const struct io_uring_cqe * cqe)114 static void ublk_loop_io_done(struct ublk_thread *t, struct ublk_queue *q,
115 const struct io_uring_cqe *cqe)
116 {
117 unsigned tag = user_data_to_tag(cqe->user_data);
118 unsigned op = user_data_to_op(cqe->user_data);
119 struct ublk_io *io = ublk_get_io(q, tag);
120
121 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
122 if (!io->result)
123 io->result = cqe->res;
124 if (cqe->res < 0)
125 ublk_err("%s: io failed op %x user_data %lx\n",
126 __func__, op, cqe->user_data);
127 }
128
129 /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
130 if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
131 io->tgt_ios += 1;
132
133 if (ublk_completed_tgt_io(t, q, tag))
134 ublk_complete_io(t, q, tag, io->result);
135 }
136
ublk_loop_tgt_init(const struct dev_ctx * ctx,struct ublk_dev * dev)137 static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
138 {
139 unsigned long long bytes;
140 int ret;
141 struct ublk_params p = {
142 .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN,
143 .basic = {
144 .attrs = UBLK_ATTR_VOLATILE_CACHE,
145 .logical_bs_shift = 9,
146 .physical_bs_shift = 12,
147 .io_opt_shift = 12,
148 .io_min_shift = 9,
149 .max_sectors = dev->dev_info.max_io_buf_bytes >> 9,
150 },
151 .dma = {
152 .alignment = 511,
153 },
154 };
155
156 if (ctx->auto_zc_fallback) {
157 ublk_err("%s: not support auto_zc_fallback\n", __func__);
158 return -EINVAL;
159 }
160
161 ret = backing_file_tgt_init(dev);
162 if (ret)
163 return ret;
164
165 if (dev->tgt.nr_backing_files != 1)
166 return -EINVAL;
167
168 bytes = dev->tgt.backing_file_size[0];
169 dev->tgt.dev_size = bytes;
170 p.basic.dev_sectors = bytes >> 9;
171 dev->tgt.params = p;
172
173 return 0;
174 }
175
176 const struct ublk_tgt_ops loop_tgt_ops = {
177 .name = "loop",
178 .init_tgt = ublk_loop_tgt_init,
179 .deinit_tgt = backing_file_tgt_deinit,
180 .queue_io = ublk_loop_queue_io,
181 .tgt_io_done = ublk_loop_io_done,
182 };
183