xref: /linux/tools/testing/selftests/ublk/null.c (revision 37a93dd5c49b5fda807fd204edf2547c3493319c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 
3 #include "kublk.h"
4 
5 #ifndef IORING_NOP_INJECT_RESULT
6 #define IORING_NOP_INJECT_RESULT        (1U << 0)
7 #endif
8 
9 #ifndef IORING_NOP_FIXED_BUFFER
10 #define IORING_NOP_FIXED_BUFFER         (1U << 3)
11 #endif
12 
13 static int ublk_null_tgt_init(const struct dev_ctx *ctx, struct ublk_dev *dev)
14 {
15 	const struct ublksrv_ctrl_dev_info *info = &dev->dev_info;
16 	unsigned long dev_size = 250UL << 30;
17 
18 	dev->tgt.dev_size = dev_size;
19 	dev->tgt.params = (struct ublk_params) {
20 		.types = UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DMA_ALIGN |
21 			UBLK_PARAM_TYPE_SEGMENT,
22 		.basic = {
23 			.logical_bs_shift	= 9,
24 			.physical_bs_shift	= 12,
25 			.io_opt_shift		= 12,
26 			.io_min_shift		= 9,
27 			.max_sectors		= info->max_io_buf_bytes >> 9,
28 			.dev_sectors		= dev_size >> 9,
29 		},
30 		.dma = {
31 			.alignment 		= 4095,
32 		},
33 		.seg = {
34 			.seg_boundary_mask 	= 4095,
35 			.max_segment_size 	= 32 << 10,
36 			.max_segments 		= 32,
37 		},
38 	};
39 	ublk_set_integrity_params(ctx, &dev->tgt.params);
40 
41 	if (info->flags & UBLK_F_SUPPORT_ZERO_COPY)
42 		dev->tgt.sq_depth = dev->tgt.cq_depth = 2 * info->queue_depth;
43 	return 0;
44 }
45 
46 static void __setup_nop_io(int tag, const struct ublksrv_io_desc *iod,
47 		struct io_uring_sqe *sqe, int q_id, unsigned buf_idx)
48 {
49 	unsigned ublk_op = ublksrv_get_op(iod);
50 
51 	io_uring_prep_nop(sqe);
52 	sqe->buf_index = buf_idx;
53 	sqe->flags |= IOSQE_FIXED_FILE;
54 	sqe->rw_flags = IORING_NOP_FIXED_BUFFER | IORING_NOP_INJECT_RESULT;
55 	sqe->len = iod->nr_sectors << 9; 	/* injected result */
56 	sqe->user_data = build_user_data(tag, ublk_op, 0, q_id, 1);
57 }
58 
59 static int null_queue_zc_io(struct ublk_thread *t, struct ublk_queue *q,
60 			    int tag)
61 {
62 	const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
63 	struct io_uring_sqe *sqe[3];
64 	unsigned short buf_idx = ublk_io_buf_idx(t, q, tag);
65 
66 	ublk_io_alloc_sqes(t, sqe, 3);
67 
68 	io_uring_prep_buf_register(sqe[0], q, tag, q->q_id, buf_idx);
69 	sqe[0]->user_data = build_user_data(tag,
70 			ublk_cmd_op_nr(sqe[0]->cmd_op), 0, q->q_id, 1);
71 	sqe[0]->flags |= IOSQE_CQE_SKIP_SUCCESS | IOSQE_IO_HARDLINK;
72 
73 	__setup_nop_io(tag, iod, sqe[1], q->q_id, buf_idx);
74 	sqe[1]->flags |= IOSQE_IO_HARDLINK;
75 
76 	io_uring_prep_buf_unregister(sqe[2], q, tag, q->q_id, buf_idx);
77 	sqe[2]->user_data = build_user_data(tag, ublk_cmd_op_nr(sqe[2]->cmd_op), 0, q->q_id, 1);
78 
79 	// buf register is marked as IOSQE_CQE_SKIP_SUCCESS
80 	return 2;
81 }
82 
83 static int null_queue_auto_zc_io(struct ublk_thread *t, struct ublk_queue *q,
84 				 int tag)
85 {
86 	const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
87 	struct io_uring_sqe *sqe[1];
88 
89 	ublk_io_alloc_sqes(t, sqe, 1);
90 	__setup_nop_io(tag, iod, sqe[0], q->q_id, ublk_io_buf_idx(t, q, tag));
91 	return 1;
92 }
93 
94 static void ublk_null_io_done(struct ublk_thread *t, struct ublk_queue *q,
95 			      const struct io_uring_cqe *cqe)
96 {
97 	unsigned tag = user_data_to_tag(cqe->user_data);
98 	unsigned op = user_data_to_op(cqe->user_data);
99 	struct ublk_io *io = ublk_get_io(q, tag);
100 
101 	if (cqe->res < 0 || op != ublk_cmd_op_nr(UBLK_U_IO_UNREGISTER_IO_BUF)) {
102 		if (!io->result)
103 			io->result = cqe->res;
104 		if (cqe->res < 0)
105 			ublk_err("%s: io failed op %x user_data %lx\n",
106 					__func__, op, cqe->user_data);
107 	}
108 
109 	/* buffer register op is IOSQE_CQE_SKIP_SUCCESS */
110 	if (op == ublk_cmd_op_nr(UBLK_U_IO_REGISTER_IO_BUF))
111 		io->tgt_ios += 1;
112 
113 	if (ublk_completed_tgt_io(t, q, tag))
114 		ublk_complete_io(t, q, tag, io->result);
115 }
116 
117 static int ublk_null_queue_io(struct ublk_thread *t, struct ublk_queue *q,
118 			      int tag)
119 {
120 	const struct ublksrv_io_desc *iod = ublk_get_iod(q, tag);
121 	unsigned auto_zc = ublk_queue_use_auto_zc(q);
122 	unsigned zc = ublk_queue_use_zc(q);
123 	int queued;
124 
125 	if (auto_zc && !ublk_io_auto_zc_fallback(iod))
126 		queued = null_queue_auto_zc_io(t, q, tag);
127 	else if (zc)
128 		queued = null_queue_zc_io(t, q, tag);
129 	else {
130 		ublk_complete_io(t, q, tag, iod->nr_sectors << 9);
131 		return 0;
132 	}
133 	ublk_queued_tgt_io(t, q, tag, queued);
134 	return 0;
135 }
136 
137 /*
138  * return invalid buffer index for triggering auto buffer register failure,
139  * then UBLK_IO_RES_NEED_REG_BUF handling is covered
140  */
141 static unsigned short ublk_null_buf_index(const struct ublk_thread *t,
142 		const struct ublk_queue *q, int tag)
143 {
144 	if (ublk_queue_auto_zc_fallback(q))
145 		return (unsigned short)-1;
146 	return ublk_io_buf_idx(t, q, tag);
147 }
148 
149 const struct ublk_tgt_ops null_tgt_ops = {
150 	.name = "null",
151 	.init_tgt = ublk_null_tgt_init,
152 	.queue_io = ublk_null_queue_io,
153 	.tgt_io_done = ublk_null_io_done,
154 	.buf_index = ublk_null_buf_index,
155 };
156