1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "kublk.h"
4
ublk_to_uring_op(const struct ublksrv_io_desc * iod,int zc)5 static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int zc)
6 {
7 unsigned ublk_op = ublksrv_get_op(iod);
8
9 if (ublk_op == UBLK_IO_OP_READ)
10 return zc ? IORING_OP_READ_FIXED : IORING_OP_READ;
11 else if (ublk_op == UBLK_IO_OP_WRITE)
12 return zc ? IORING_OP_WRITE_FIXED : IORING_OP_WRITE;
13 assert(0);
14 }
15
loop_queue_flush_io(struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag)16 static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
17 {
18 unsigned ublk_op = ublksrv_get_op(iod);
19 struct io_uring_sqe *sqe[1];
20
21 ublk_queue_alloc_sqes(q, sqe, 1);
22 io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
23 io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
24 /* bit63 marks us as tgt io */
25 sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1);
26 return 1;
27 }
28
loop_queue_tgt_rw_io(struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag)29 static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
30 {
31 unsigned ublk_op = ublksrv_get_op(iod);
32 int zc = ublk_queue_use_zc(q);
33 enum io_uring_op op = ublk_to_uring_op(iod, zc);
34 struct io_uring_sqe *sqe[3];
35
36 if (!zc) {
37 ublk_queue_alloc_sqes(q, sqe, 1);
38 if (!sqe[0])
39 return -ENOMEM;
40
41 io_uring_prep_rw(op, sqe[0], 1 /*fds[1]*/,
42 (void *)iod->addr,
43 iod->nr_sectors << 9,
44 iod->start_sector << 9);
45 io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
46 /* bit63 marks us as tgt io */
47 sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1);
48 return 1;
49 }
50
51 ublk_queue_alloc_sqes(q, sqe, 3);
52
53 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
54 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
55 sqe[0]->user_data = build_user_data(tag,
56 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1);
57
58 io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0,
59 iod->nr_sectors << 9,
60 iod->start_sector << 9);
61 sqe[1]->buf_index = tag;
62 sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
63 sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1);
64
65 io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag);
66 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1);
67
68 return 2;
69 }
70
loop_queue_tgt_io(struct ublk_queue * q,int tag)71 static int loop_queue_tgt_io(struct ublk_queue *q, int tag)
72 {
73 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
74 unsigned ublk_op = ublksrv_get_op(iod);
75 int ret;
76
77 switch (ublk_op) {
78 case UBLK_IO_OP_FLUSH:
79 ret = loop_queue_flush_io(q, iod, tag);
80 break;
81 case UBLK_IO_OP_WRITE_ZEROES:
82 case UBLK_IO_OP_DISCARD:
83 ret = -ENOTSUP;
84 break;
85 case UBLK_IO_OP_READ:
86 case UBLK_IO_OP_WRITE:
87 ret = loop_queue_tgt_rw_io(q, iod, tag);
88 break;
89 default:
90 ret = -EINVAL;
91 break;
92 }
93
94 ublk_dbg(UBLK_DBG_IO, "%s: tag %d ublk io %x %llx %u\n", __func__, tag,
95 iod->op_flags, iod->start_sector, iod->nr_sectors << 9);
96 return ret;
97 }
98
ublk_loop_queue_io(struct ublk_queue * q,int tag)99 static int ublk_loop_queue_io(struct ublk_queue *q, int tag)
100 {
101 int queued = loop_queue_tgt_io(q, tag);
102
103 ublk_queued_tgt_io(q, tag, queued);
104 return 0;
105 }
106
ublk_loop_io_done(struct ublk_queue * q,int tag,const struct io_uring_cqe * cqe)107 static void ublk_loop_io_done(struct ublk_queue *q, int tag,
108 const struct io_uring_cqe *cqe)
109 {
110 unsigned op = user_data_to_op(cqe->user_data);
111 struct ublk_io *io = ublk_get_io(q, tag);
112
113 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
114 if (!io->result)
115 io->result = cqe->res;
116 if (cqe->res < 0)
117 ublk_err("%s: io failed op %x user_data %lx\n",
118 __func__, op, cqe->user_data);
119 }
120
121 /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
122 if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
123 io->tgt_ios += 1;
124
125 if (ublk_completed_tgt_io(q, tag))
126 ublk_complete_io(q, tag, io->result);
127 }
128
ublk_loop_tgt_init(const struct dev_ctx * ctx,struct ublk_dev * dev)129 static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
130 {
131 unsigned long long bytes;
132 int ret;
133 struct ublk_params p = {
134 .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN,
135 .basic = {
136 .attrs = UBLK_ATTR_VOLATILE_CACHE,
137 .logical_bs_shift = 9,
138 .physical_bs_shift = 12,
139 .io_opt_shift = 12,
140 .io_min_shift = 9,
141 .max_sectors = dev->dev_info.max_io_buf_bytes >> 9,
142 },
143 .dma = {
144 .alignment = 511,
145 },
146 };
147
148 ret = backing_file_tgt_init(dev);
149 if (ret)
150 return ret;
151
152 if (dev->tgt.nr_backing_files != 1)
153 return -EINVAL;
154
155 bytes = dev->tgt.backing_file_size[0];
156 dev->tgt.dev_size = bytes;
157 p.basic.dev_sectors = bytes >> 9;
158 dev->tgt.params = p;
159
160 return 0;
161 }
162
163 const struct ublk_tgt_ops loop_tgt_ops = {
164 .name = "loop",
165 .init_tgt = ublk_loop_tgt_init,
166 .deinit_tgt = backing_file_tgt_deinit,
167 .queue_io = ublk_loop_queue_io,
168 .tgt_io_done = ublk_loop_io_done,
169 };
170