1 /* SPDX-License-Identifier: GPL-2.0 */
2
3 #include "kublk.h"
4
5 #ifndef IORING_NOP_INJECT_RESULT
6 #define IORING_NOP_INJECT_RESULT (1U << 0)
7 #endif
8
9 #ifndef IORING_NOP_FIXED_BUFFER
10 #define IORING_NOP_FIXED_BUFFER (1U << 3)
11 #endif
12
ublk_null_tgt_init(const struct dev_ctx * ctx,struct ublk_dev * dev)13 static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
14 {
15 const struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
16 unsigned long dev_size = 250UL << 30;
17
18 dev->tgt.dev_size = dev_size;
19 dev->tgt.params = (struct ublk_params) {
20 .types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN |
21 UBLK_PARAM_TYPE_SEGMENT,
22 .basic = {
23 .logical_bs_shift = 9,
24 .physical_bs_shift = 12,
25 .io_opt_shift = 12,
26 .io_min_shift = 9,
27 .max_sectors = info->max_io_buf_bytes >> 9,
28 .dev_sectors = dev_size >> 9,
29 },
30 .dma = {
31 .alignment = 4095,
32 },
33 .seg = {
34 .seg_boundary_mask = 4095,
35 .max_segment_size = 32 << 10,
36 .max_segments = 32,
37 },
38 };
39
40 if (info->flags & UBLK_F_SUPPORT_ZERO_COPY)
41 dev->tgt.sq_depth = dev->tgt.cq_depth = 2 * info->queue_depth;
42 return 0;
43 }
44
null_queue_zc_io(struct ublk_queue * q,int tag)45 static int null_queue_zc_io(struct ublk_queue *q, int tag)
46 {
47 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
48 unsigned ublk_op = ublksrv_get_op(iod);
49 struct io_uring_sqe *sqe[3];
50
51 ublk_queue_alloc_sqes(q, sqe, 3);
52
53 io_uring_prep_buf_register(sqe[0], 0, tag, q->q_id, tag);
54 sqe[0]->user_data = build_user_data(tag,
55 ublk_cmd_op_nr(sqe[0]->cmd_op), 0, 1);
56 sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
57
58 io_uring_prep_nop(sqe[1]);
59 sqe[1]->buf_index = tag;
60 sqe[1]->flags |= IOSQE_FIXED_FILE | IOSQE_IO_HARDLINK;
61 sqe[1]->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT;
62 sqe[1]->len = iod->nr_sectors << 9; /* injected result */
63 sqe[1]->user_data = build_user_data(tag, ublk_op, 0, 1);
64
65 io_uring_prep_buf_unregister(sqe[2], 0, tag, q->q_id, tag);
66 sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, 1);
67
68 // buf register is marked as IOSQE_CQE_SKIP_SUCCESS
69 return 2;
70 }
71
ublk_null_io_done(struct ublk_queue * q,int tag,const struct io_uring_cqe * cqe)72 static void ublk_null_io_done(struct ublk_queue *q, int tag,
73 const struct io_uring_cqe *cqe)
74 {
75 unsigned op = user_data_to_op(cqe->user_data);
76 struct ublk_io *io = ublk_get_io(q, tag);
77
78 if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
79 if (!io->result)
80 io->result = cqe->res;
81 if (cqe->res < 0)
82 ublk_err("%s: io failed op %x user_data %lx\n",
83 __func__, op, cqe->user_data);
84 }
85
86 /* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
87 if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
88 io->tgt_ios += 1;
89
90 if (ublk_completed_tgt_io(q, tag))
91 ublk_complete_io(q, tag, io->result);
92 }
93
ublk_null_queue_io(struct ublk_queue * q,int tag)94 static int ublk_null_queue_io(struct ublk_queue *q, int tag)
95 {
96 const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
97 int zc = ublk_queue_use_zc(q);
98 int queued;
99
100 if (!zc) {
101 ublk_complete_io(q, tag, iod->nr_sectors << 9);
102 return 0;
103 }
104
105 queued = null_queue_zc_io(q, tag);
106 ublk_queued_tgt_io(q, tag, queued);
107 return 0;
108 }
109
110 const struct ublk_tgt_ops null_tgt_ops = {
111 .name = "null",
112 .init_tgt = ublk_null_tgt_init,
113 .queue_io = ublk_null_queue_io,
114 .tgt_io_done = ublk_null_io_done,
115 };
116