1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #include "kublk.h" 4 5 #ifndef IORING_NOP_INJECT_RESULT 6 #define IORING_NOP_INJECT_RESULT (1U << 0) 7 #endif 8 9 #ifndef IORING_NOP_FIXED_BUFFER 10 #define IORING_NOP_FIXED_BUFFER (1U << 3) 11 #endif 12 13 static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) 14 { 15 const struct ublksrv_ctrl_dev_info *info = &dev->dev_info; 16 unsigned long dev_size = 250UL << 30; 17 18 dev->tgt.dev_size = dev_size; 19 dev->tgt.params = (struct ublk_params) { 20 .types = UBLK_PARAM_TYPE_BASIC, 21 .basic = { 22 .logical_bs_shift = 9, 23 .physical_bs_shift = 12, 24 .io_opt_shift = 12, 25 .io_min_shift = 9, 26 .max_sectors = info->max_io_buf_bytes >> 9, 27 .dev_sectors = dev_size >> 9, 28 }, 29 }; 30 31 if (info->flags & UBLK_F_SUPPORT_ZERO_COPY) 32 dev->tgt.sq_depth = dev->tgt.cq_depth = 2 * info->queue_depth; 33 return 0; 34 } 35 36 static int null_queue_zc_io(struct ublk_queue *q, int tag) 37 { 38 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 39 unsigned ublk_op = ublksrv_get_op(iod); 40 struct io_uring_sqe *sqe[3]; 41 42 ublk_queue_alloc_sqes(q, sqe, 3); 43 44 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag); 45 sqe[0]->user_data = build_user_data(tag, 46 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1); 47 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; 48 49 io_uring_prep_nop(sqe[1]); 50 sqe[1]->buf_index = tag; 51 sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK; 52 sqe[1]->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT; 53 sqe[1]->len = iod->nr_sectors << 9; /* injected result */ 54 sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1); 55 56 io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag); 57 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1); 58 59 // buf register is marked as IOSQE_CQE_SKIP_SUCCESS 60 return 2; 61 } 62 63 static void ublk_null_io_done(struct ublk_queue *q, int tag, 64 const struct io_uring_cqe *cqe) 65 { 66 unsigned op = user_data_to_op(cqe->user_data); 67 struct ublk_io *io = ublk_get_io(q, tag); 68 69 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) { 70 if (!io->result) 71 io->result = cqe->res; 72 if (cqe->res < 0) 73 ublk_err("%s: io failed op %x user_data %lx\n", 74 __func__, op, cqe->user_data); 75 } 76 77 /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */ 78 if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF)) 79 io->tgt_ios += 1; 80 81 if (ublk_completed_tgt_io(q, tag)) 82 ublk_complete_io(q, tag, io->result); 83 } 84 85 static int ublk_null_queue_io(struct ublk_queue *q, int tag) 86 { 87 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 88 int zc = ublk_queue_use_zc(q); 89 int queued; 90 91 if (!zc) { 92 ublk_complete_io(q, tag, iod->nr_sectors << 9); 93 return 0; 94 } 95 96 queued = null_queue_zc_io(q, tag); 97 ublk_queued_tgt_io(q, tag, queued); 98 return 0; 99 } 100 101 const struct ublk_tgt_ops null_tgt_ops = { 102 .name = "null", 103 .init_tgt = ublk_null_tgt_init, 104 .queue_io = ublk_null_queue_io, 105 .tgt_io_done = ublk_null_io_done, 106 }; 107