1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "kublk.h" 4 5 static enum io_uring_op ublk_to_uring_op(const struct ublksrv_io_desc *iod, int zc) 6 { 7 unsigned ublk_op = ublksrv_get_op(iod); 8 9 if (ublk_op == UBLK_IO_OP_READ) 10 return zc ? IORING_OP_READ_FIXED : IORING_OP_READ; 11 else if (ublk_op == UBLK_IO_OP_WRITE) 12 return zc ? IORING_OP_WRITE_FIXED : IORING_OP_WRITE; 13 assert(0); 14 } 15 16 static int loop_queue_flush_io(struct ublk_thread *t, struct ublk_queue *q, 17 const struct ublksrv_io_desc *iod, int tag) 18 { 19 unsigned ublk_op = ublksrv_get_op(iod); 20 struct io_uring_sqe *sqe[1]; 21 22 ublk_io_alloc_sqes(t, sqe, 1); 23 io_uring_prep_fsync(sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/, IORING_FSYNC_DATASYNC); 24 io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); 25 /* bit63 marks us as tgt io */ 26 sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); 27 return 1; 28 } 29 30 static int loop_queue_tgt_rw_io(struct ublk_thread *t, struct ublk_queue *q, 31 const struct ublksrv_io_desc *iod, int tag) 32 { 33 unsigned ublk_op = ublksrv_get_op(iod); 34 unsigned zc = ublk_queue_use_zc(q); 35 unsigned auto_zc = ublk_queue_use_auto_zc(q); 36 enum io_uring_op op = ublk_to_uring_op(iod, zc | auto_zc); 37 struct ublk_io *io = ublk_get_io(q, tag); 38 struct io_uring_sqe *sqe[3]; 39 void *addr = io->buf_addr; 40 41 if (!zc || auto_zc) { 42 ublk_io_alloc_sqes(t, sqe, 1); 43 if (!sqe[0]) 44 return -ENOMEM; 45 46 io_uring_prep_rw(op, sqe[0], ublk_get_registered_fd(q, 1) /*fds[1]*/, 47 addr, 48 iod->nr_sectors << 9, 49 iod->start_sector << 9); 50 if (auto_zc) 51 sqe[0]->buf_index = tag; 52 io_uring_sqe_set_flags(sqe[0], IOSQE_FIXED_FILE); 53 /* bit63 marks us as tgt io */ 54 sqe[0]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); 55 return 1; 56 } 57 58 ublk_io_alloc_sqes(t, sqe, 3); 59 60 io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, io->buf_index); 61 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; 62 sqe[0]->user_data = build_user_data(tag, 63 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); 64 65 io_uring_prep_rw(op, sqe[1], ublk_get_registered_fd(q, 1) /*fds[1]*/, 0, 66 iod->nr_sectors << 9, 67 iod->start_sector << 9); 68 sqe[1]->buf_index = tag; 69 sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK; 70 sqe[1]->user_data = build_user_data(tag, ublk_op, 0, q->q_id, 1); 71 72 io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, io->buf_index); 73 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); 74 75 return 2; 76 } 77 78 static int loop_queue_tgt_io(struct ublk_thread *t, struct ublk_queue *q, int tag) 79 { 80 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 81 unsigned ublk_op = ublksrv_get_op(iod); 82 int ret; 83 84 switch (ublk_op) { 85 case UBLK_IO_OP_FLUSH: 86 ret = loop_queue_flush_io(t, q, iod, tag); 87 break; 88 case UBLK_IO_OP_WRITE_ZEROES: 89 case UBLK_IO_OP_DISCARD: 90 ret = -ENOTSUP; 91 break; 92 case UBLK_IO_OP_READ: 93 case UBLK_IO_OP_WRITE: 94 ret = loop_queue_tgt_rw_io(t, q, iod, tag); 95 break; 96 default: 97 ret = -EINVAL; 98 break; 99 } 100 101 ublk_dbg(UBLK_DBG_IO, "%s: tag %d ublk io %x %llx %u\n", __func__, tag, 102 iod->op_flags, iod->start_sector, iod->nr_sectors << 9); 103 return ret; 104 } 105 106 static int ublk_loop_queue_io(struct ublk_thread *t, struct ublk_queue *q, 107 int tag) 108 { 109 int queued = loop_queue_tgt_io(t, q, tag); 110 111 ublk_queued_tgt_io(t, q, tag, queued); 112 return 0; 113 } 114 115 static void ublk_loop_io_done(struct ublk_thread *t, struct ublk_queue *q, 116 const struct io_uring_cqe *cqe) 117 { 118 unsigned tag = user_data_to_tag(cqe->user_data); 119 unsigned op = user_data_to_op(cqe->user_data); 120 struct ublk_io *io = ublk_get_io(q, tag); 121 122 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) { 123 if (!io->result) 124 io->result = cqe->res; 125 if (cqe->res < 0) 126 ublk_err("%s: io failed op %x user_data %lx\n", 127 __func__, op, cqe->user_data); 128 } 129 130 /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */ 131 if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF)) 132 io->tgt_ios += 1; 133 134 if (ublk_completed_tgt_io(t, q, tag)) 135 ublk_complete_io(t, q, tag, io->result); 136 } 137 138 static int ublk_loop_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) 139 { 140 unsigned long long bytes; 141 int ret; 142 struct ublk_params p = { 143 .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN, 144 .basic = { 145 .attrs = UBLK_ATTR_VOLATILE_CACHE, 146 .logical_bs_shift = 9, 147 .physical_bs_shift = 12, 148 .io_opt_shift = 12, 149 .io_min_shift = 9, 150 .max_sectors = dev->dev_info.max_io_buf_bytes >> 9, 151 }, 152 .dma = { 153 .alignment = 511, 154 }, 155 }; 156 157 if (ctx->auto_zc_fallback) { 158 ublk_err("%s: not support auto_zc_fallback\n", __func__); 159 return -EINVAL; 160 } 161 162 ret = backing_file_tgt_init(dev); 163 if (ret) 164 return ret; 165 166 if (dev->tgt.nr_backing_files != 1) 167 return -EINVAL; 168 169 bytes = dev->tgt.backing_file_size[0]; 170 dev->tgt.dev_size = bytes; 171 p.basic.dev_sectors = bytes >> 9; 172 dev->tgt.params = p; 173 174 return 0; 175 } 176 177 const struct ublk_tgt_ops loop_tgt_ops = { 178 .name = "loop", 179 .init_tgt = ublk_loop_tgt_init, 180 .deinit_tgt = backing_file_tgt_deinit, 181 .queue_io = ublk_loop_queue_io, 182 .tgt_io_done = ublk_loop_io_done, 183 }; 184