1 /* SPDX-License-Identifier: GPL-2.0 */ 2 3 #include "kublk.h" 4 5 #ifndef IORING_NOP_INJECT_RESULT 6 #define IORING_NOP_INJECT_RESULT (1U << 0) 7 #endif 8 9 #ifndef IORING_NOP_FIXED_BUFFER 10 #define IORING_NOP_FIXED_BUFFER (1U << 3) 11 #endif 12 13 static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev) 14 { 15 const struct ublksrv_ctrl_dev_info *info = &dev->dev_info; 16 unsigned long dev_size = 250UL << 30; 17 18 dev->tgt.dev_size = dev_size; 19 dev->tgt.params = (struct ublk_params) { 20 .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN | 21 UBLK_PARAM_TYPE_SEGMENT, 22 .basic = { 23 .logical_bs_shift = 9, 24 .physical_bs_shift = 12, 25 .io_opt_shift = 12, 26 .io_min_shift = 9, 27 .max_sectors = info->max_io_buf_bytes >> 9, 28 .dev_sectors = dev_size >> 9, 29 }, 30 .dma = { 31 .alignment = 4095, 32 }, 33 .seg = { 34 .seg_boundary_mask = 4095, 35 .max_segment_size = 32 << 10, 36 .max_segments = 32, 37 }, 38 }; 39 40 if (info->flags & UBLK_F_SUPPORT_ZERO_COPY) 41 dev->tgt.sq_depth = dev->tgt.cq_depth = 2 * info->queue_depth; 42 return 0; 43 } 44 45 static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod, 46 struct io_uring_sqe *sqe, int q_id) 47 { 48 unsigned ublk_op = ublksrv_get_op(iod); 49 50 io_uring_prep_nop(sqe); 51 sqe->buf_index = tag; 52 sqe->flags |= IOSQE_FIXED_FILE; 53 sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT; 54 sqe->len = iod->nr_sectors << 9; /* injected result */ 55 sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1); 56 } 57 58 static int null_queue_zc_io(struct ublk_queue *q, int tag) 59 { 60 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 61 struct io_uring_sqe *sqe[3]; 62 63 ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 3); 64 65 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); 66 sqe[0]->user_data = build_user_data(tag, 67 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1); 68 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK; 69 70 __setup_nop_io(tag, iod, sqe[1], q->q_id); 71 sqe[1]->flags |= IOSQE_IO_HARDLINK; 72 73 io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, ublk_get_io(q, tag)->buf_index); 74 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1); 75 76 // buf register is marked as IOSQE_CQE_SKIP_SUCCESS 77 return 2; 78 } 79 80 static int null_queue_auto_zc_io(struct ublk_queue *q, int tag) 81 { 82 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 83 struct io_uring_sqe *sqe[1]; 84 85 ublk_io_alloc_sqes(ublk_get_io(q, tag), sqe, 1); 86 __setup_nop_io(tag, iod, sqe[0], q->q_id); 87 return 1; 88 } 89 90 static void ublk_null_io_done(struct ublk_queue *q, int tag, 91 const struct io_uring_cqe *cqe) 92 { 93 unsigned op = user_data_to_op(cqe->user_data); 94 struct ublk_io *io = ublk_get_io(q, tag); 95 96 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) { 97 if (!io->result) 98 io->result = cqe->res; 99 if (cqe->res < 0) 100 ublk_err("%s: io failed op %x user_data %lx\n", 101 __func__, op, cqe->user_data); 102 } 103 104 /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */ 105 if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF)) 106 io->tgt_ios += 1; 107 108 if (ublk_completed_tgt_io(q, tag)) 109 ublk_complete_io(q, tag, io->result); 110 } 111 112 static int ublk_null_queue_io(struct ublk_queue *q, int tag) 113 { 114 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag); 115 unsigned auto_zc = ublk_queue_use_auto_zc(q); 116 unsigned zc = ublk_queue_use_zc(q); 117 int queued; 118 119 if (auto_zc && !ublk_io_auto_zc_fallback(iod)) 120 queued = null_queue_auto_zc_io(q, tag); 121 else if (zc) 122 queued = null_queue_zc_io(q, tag); 123 else { 124 ublk_complete_io(q, tag, iod->nr_sectors << 9); 125 return 0; 126 } 127 ublk_queued_tgt_io(q, tag, queued); 128 return 0; 129 } 130 131 /* 132 * return invalid buffer index for triggering auto buffer register failure, 133 * then UBLK_IO_RES_NEED_REG_BUF handling is covered 134 */ 135 static unsigned short ublk_null_buf_index(const struct ublk_queue *q, int tag) 136 { 137 if (q->state & UBLKSRV_AUTO_BUF_REG_FALLBACK) 138 return (unsigned short)-1; 139 return q->ios[tag].buf_index; 140 } 141 142 const struct ublk_tgt_ops null_tgt_ops = { 143 .name = "null", 144 .init_tgt = ublk_null_tgt_init, 145 .queue_io = ublk_null_queue_io, 146 .tgt_io_done = ublk_null_io_done, 147 .buf_index = ublk_null_buf_index, 148 }; 149