15d95bfb5SMing Lei // SPDX-License-Identifier: GPL-2.0
25d95bfb5SMing Lei
35d95bfb5SMing Lei #include "kublk.h"
45d95bfb5SMing Lei
ublk_to_uring_op(const struct ublksrv_io_desc * iod,int zc)5bedc9cbcSMing Lei static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int zc)
6bedc9cbcSMing Lei {
7bedc9cbcSMing Lei unsigned ublk_op = ublksrv_get_op(iod);
8bedc9cbcSMing Lei
9bedc9cbcSMing Lei if (ublk_op == UBLK_IO_OP_READ)
10bedc9cbcSMing Lei return zc ? IORING_OP_READ_FIXED : IORING_OP_READ;
11bedc9cbcSMing Lei else if (ublk_op == UBLK_IO_OP_WRITE)
12bedc9cbcSMing Lei return zc ? IORING_OP_WRITE_FIXED : IORING_OP_WRITE;
13bedc9cbcSMing Lei assert(0);
14bedc9cbcSMing Lei }
15bedc9cbcSMing Lei
loop_queue_flush_io(struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag)16*263846ebSMing Lei static int loop_queue_flush_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
17*263846ebSMing Lei {
18*263846ebSMing Lei unsigned ublk_op = ublksrv_get_op(iod);
19*263846ebSMing Lei struct io_uring_sqe *sqe[1];
20*263846ebSMing Lei
21*263846ebSMing Lei ublk_queue_alloc_sqes(q, sqe, 1);
22*263846ebSMing Lei io_uring_prep_fsync(sqe[0], 1 /*fds[1]*/, IORING_FSYNC_DATASYNC);
23*263846ebSMing Lei io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
24*263846ebSMing Lei /* bit63 marks us as tgt io */
25*263846ebSMing Lei sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1);
26*263846ebSMing Lei return 1;
27*263846ebSMing Lei }
28*263846ebSMing Lei
loop_queue_tgt_rw_io(struct ublk_queue * q,const struct ublksrv_io_desc * iod,int tag)29bedc9cbcSMing Lei static int loop_queue_tgt_rw_io(struct ublk_queue *q, const struct ublksrv_io_desc *iod, int tag)
30bedc9cbcSMing Lei {
31*263846ebSMing Lei unsigned ublk_op = ublksrv_get_op(iod);
32bedc9cbcSMing Lei int zc = ublk_queue_use_zc(q);
33bedc9cbcSMing Lei enum io_uring_op op = ublk_to_uring_op(iod, zc);
34f2639ed1SMing Lei struct io_uring_sqe *sqe[3];
35bedc9cbcSMing Lei
36bedc9cbcSMing Lei if (!zc) {
37f2639ed1SMing Lei ublk_queue_alloc_sqes(q, sqe, 1);
38f2639ed1SMing Lei if (!sqe[0])
39bedc9cbcSMing Lei return -ENOMEM;
40bedc9cbcSMing Lei
41f2639ed1SMing Lei io_uring_prep_rw(op, sqe[0], 1 /*fds[1]*/,
42bedc9cbcSMing Lei (void *)iod->addr,
43bedc9cbcSMing Lei iod->nr_sectors << 9,
44bedc9cbcSMing Lei iod->start_sector << 9);
45f2639ed1SMing Lei io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE);
46bedc9cbcSMing Lei /* bit63 marks us as tgt io */
47*263846ebSMing Lei sqe[0]->user_data = build_user_data(tag, ublk_op, 0, 1);
48*263846ebSMing Lei return 1;
49bedc9cbcSMing Lei }
50bedc9cbcSMing Lei
51f2639ed1SMing Lei ublk_queue_alloc_sqes(q, sqe, 3);
52bedc9cbcSMing Lei
53f2639ed1SMing Lei io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
54*263846ebSMing Lei sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
55*263846ebSMing Lei sqe[0]->user_data = build_user_data(tag,
56*263846ebSMing Lei ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1);
57bedc9cbcSMing Lei
58f2639ed1SMing Lei io_uring_prep_rw(op, sqe[1], 1 /*fds[1]*/, 0,
59bedc9cbcSMing Lei iod->nr_sectors << 9,
60bedc9cbcSMing Lei iod->start_sector << 9);
61f2639ed1SMing Lei sqe[1]->buf_index = tag;
62*263846ebSMing Lei sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
63*263846ebSMing Lei sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1);
64bedc9cbcSMing Lei
65f2639ed1SMing Lei io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag);
66*263846ebSMing Lei sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1);
67bedc9cbcSMing Lei
68*263846ebSMing Lei return 2;
69bedc9cbcSMing Lei }
70bedc9cbcSMing Lei
loop_queue_tgt_io(struct ublk_queue * q,int tag)715d95bfb5SMing Lei static int loop_queue_tgt_io(struct ublk_queue *q, int tag)
725d95bfb5SMing Lei {
735d95bfb5SMing Lei const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
745d95bfb5SMing Lei unsigned ublk_op = ublksrv_get_op(iod);
75*263846ebSMing Lei int ret;
765d95bfb5SMing Lei
775d95bfb5SMing Lei switch (ublk_op) {
785d95bfb5SMing Lei case UBLK_IO_OP_FLUSH:
79*263846ebSMing Lei ret = loop_queue_flush_io(q, iod, tag);
805d95bfb5SMing Lei break;
815d95bfb5SMing Lei case UBLK_IO_OP_WRITE_ZEROES:
825d95bfb5SMing Lei case UBLK_IO_OP_DISCARD:
83*263846ebSMing Lei ret = -ENOTSUP;
84*263846ebSMing Lei break;
855d95bfb5SMing Lei case UBLK_IO_OP_READ:
865d95bfb5SMing Lei case UBLK_IO_OP_WRITE:
87*263846ebSMing Lei ret = loop_queue_tgt_rw_io(q, iod, tag);
885d95bfb5SMing Lei break;
895d95bfb5SMing Lei default:
90*263846ebSMing Lei ret = -EINVAL;
91*263846ebSMing Lei break;
925d95bfb5SMing Lei }
935d95bfb5SMing Lei
945d95bfb5SMing Lei ublk_dbg(UBLK_DBG_IO, "%s: tag %d ublk io %x %llx %u\n", __func__, tag,
955d95bfb5SMing Lei iod->op_flags, iod->start_sector, iod->nr_sectors << 9);
96*263846ebSMing Lei return ret;
975d95bfb5SMing Lei }
985d95bfb5SMing Lei
ublk_loop_queue_io(struct ublk_queue * q,int tag)995d95bfb5SMing Lei static int ublk_loop_queue_io(struct ublk_queue *q, int tag)
1005d95bfb5SMing Lei {
1015d95bfb5SMing Lei int queued = loop_queue_tgt_io(q, tag);
1025d95bfb5SMing Lei
103*263846ebSMing Lei ublk_queued_tgt_io(q, tag, queued);
1045d95bfb5SMing Lei return 0;
1055d95bfb5SMing Lei }
1065d95bfb5SMing Lei
ublk_loop_io_done(struct ublk_queue * q,int tag,const struct io_uring_cqe * cqe)1075d95bfb5SMing Lei static void ublk_loop_io_done(struct ublk_queue *q, int tag,
1085d95bfb5SMing Lei const struct io_uring_cqe *cqe)
1095d95bfb5SMing Lei {
110*263846ebSMing Lei unsigned op = user_data_to_op(cqe->user_data);
111*263846ebSMing Lei struct ublk_io *io = ublk_get_io(q, tag);
1125d95bfb5SMing Lei
113*263846ebSMing Lei if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
114*263846ebSMing Lei if (!io->result)
115*263846ebSMing Lei io->result = cqe->res;
116*263846ebSMing Lei if (cqe->res < 0)
117*263846ebSMing Lei ublk_err("%s: io failed op %x user_data %lx\n",
118*263846ebSMing Lei __func__, op, cqe->user_data);
119bedc9cbcSMing Lei }
120*263846ebSMing Lei
121*263846ebSMing Lei /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
122*263846ebSMing Lei if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
123*263846ebSMing Lei io->tgt_ios += 1;
124*263846ebSMing Lei
125*263846ebSMing Lei if (ublk_completed_tgt_io(q, tag))
126*263846ebSMing Lei ublk_complete_io(q, tag, io->result);
1275d95bfb5SMing Lei }
1285d95bfb5SMing Lei
ublk_loop_tgt_init(const struct dev_ctx * ctx,struct ublk_dev * dev)1298842b72aSMing Lei static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
1305d95bfb5SMing Lei {
1315d95bfb5SMing Lei unsigned long long bytes;
1325d95bfb5SMing Lei int ret;
1335d95bfb5SMing Lei struct ublk_params p = {
134bedc9cbcSMing Lei .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN,
1355d95bfb5SMing Lei .basic = {
13696af5af4SMing Lei .attrs = UBLK_ATTR_VOLATILE_CACHE,
1375d95bfb5SMing Lei .logical_bs_shift = 9,
1385d95bfb5SMing Lei .physical_bs_shift = 12,
1395d95bfb5SMing Lei .io_opt_shift = 12,
1405d95bfb5SMing Lei .io_min_shift = 9,
1415d95bfb5SMing Lei .max_sectors = dev->dev_info.max_io_buf_bytes >> 9,
1425d95bfb5SMing Lei },
143bedc9cbcSMing Lei .dma = {
144bedc9cbcSMing Lei .alignment = 511,
145bedc9cbcSMing Lei },
1465d95bfb5SMing Lei };
1475d95bfb5SMing Lei
1485d95bfb5SMing Lei ret = backing_file_tgt_init(dev);
1495d95bfb5SMing Lei if (ret)
1505d95bfb5SMing Lei return ret;
1515d95bfb5SMing Lei
152ffde32a4SMing Lei if (dev->tgt.nr_backing_files != 1)
153ffde32a4SMing Lei return -EINVAL;
154ffde32a4SMing Lei
1555d95bfb5SMing Lei bytes = dev->tgt.backing_file_size[0];
1565d95bfb5SMing Lei dev->tgt.dev_size = bytes;
1575d95bfb5SMing Lei p.basic.dev_sectors = bytes >> 9;
1585d95bfb5SMing Lei dev->tgt.params = p;
1595d95bfb5SMing Lei
1605d95bfb5SMing Lei return 0;
1615d95bfb5SMing Lei }
1625d95bfb5SMing Lei
1635d95bfb5SMing Lei const struct ublk_tgt_ops loop_tgt_ops = {
1645d95bfb5SMing Lei .name = "loop",
1655d95bfb5SMing Lei .init_tgt = ublk_loop_tgt_init,
1665d95bfb5SMing Lei .deinit_tgt = backing_file_tgt_deinit,
1675d95bfb5SMing Lei .queue_io = ublk_loop_queue_io,
1685d95bfb5SMing Lei .tgt_io_done = ublk_loop_io_done,
1695d95bfb5SMing Lei };
170