xref: /linux/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c (revision 172cdcaefea5c297fdb3d20b7d5aff60ae4fbce6)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * VDPA simulator for block device.
4  *
5  * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
6  * Copyright (c) 2021, Red Hat Inc. All rights reserved.
7  *
8  */
9 
10 #include <linux/init.h>
11 #include <linux/module.h>
12 #include <linux/device.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/blkdev.h>
16 #include <linux/vringh.h>
17 #include <linux/vdpa.h>
18 #include <linux/blkdev.h>
19 #include <uapi/linux/virtio_blk.h>
20 
21 #include "vdpa_sim.h"
22 
23 #define DRV_VERSION  "0.1"
24 #define DRV_AUTHOR   "Max Gurtovoy <mgurtovoy@nvidia.com>"
25 #define DRV_DESC     "vDPA Device Simulator for block device"
26 #define DRV_LICENSE  "GPL v2"
27 
28 #define VDPASIM_BLK_FEATURES	(VDPASIM_FEATURES | \
29 				 (1ULL << VIRTIO_BLK_F_SIZE_MAX) | \
30 				 (1ULL << VIRTIO_BLK_F_SEG_MAX)  | \
31 				 (1ULL << VIRTIO_BLK_F_BLK_SIZE) | \
32 				 (1ULL << VIRTIO_BLK_F_TOPOLOGY) | \
33 				 (1ULL << VIRTIO_BLK_F_MQ))
34 
35 #define VDPASIM_BLK_CAPACITY	0x40000
36 #define VDPASIM_BLK_SIZE_MAX	0x1000
37 #define VDPASIM_BLK_SEG_MAX	32
38 #define VDPASIM_BLK_VQ_NUM	1
39 
40 static char vdpasim_blk_id[VIRTIO_BLK_ID_BYTES] = "vdpa_blk_sim";
41 
42 static bool vdpasim_blk_check_range(u64 start_sector, size_t range_size)
43 {
44 	u64 range_sectors = range_size >> SECTOR_SHIFT;
45 
46 	if (range_size > VDPASIM_BLK_SIZE_MAX * VDPASIM_BLK_SEG_MAX)
47 		return false;
48 
49 	if (start_sector > VDPASIM_BLK_CAPACITY)
50 		return false;
51 
52 	if (range_sectors > VDPASIM_BLK_CAPACITY - start_sector)
53 		return false;
54 
55 	return true;
56 }
57 
58 /* Returns 'true' if the request is handled (with or without an I/O error)
59  * and the status is correctly written in the last byte of the 'in iov',
60  * 'false' otherwise.
61  */
62 static bool vdpasim_blk_handle_req(struct vdpasim *vdpasim,
63 				   struct vdpasim_virtqueue *vq)
64 {
65 	size_t pushed = 0, to_pull, to_push;
66 	struct virtio_blk_outhdr hdr;
67 	ssize_t bytes;
68 	loff_t offset;
69 	u64 sector;
70 	u8 status;
71 	u32 type;
72 	int ret;
73 
74 	ret = vringh_getdesc_iotlb(&vq->vring, &vq->out_iov, &vq->in_iov,
75 				   &vq->head, GFP_ATOMIC);
76 	if (ret != 1)
77 		return false;
78 
79 	if (vq->out_iov.used < 1 || vq->in_iov.used < 1) {
80 		dev_err(&vdpasim->vdpa.dev, "missing headers - out_iov: %u in_iov %u\n",
81 			vq->out_iov.used, vq->in_iov.used);
82 		return false;
83 	}
84 
85 	if (vq->in_iov.iov[vq->in_iov.used - 1].iov_len < 1) {
86 		dev_err(&vdpasim->vdpa.dev, "request in header too short\n");
87 		return false;
88 	}
89 
90 	/* The last byte is the status and we checked if the last iov has
91 	 * enough room for it.
92 	 */
93 	to_push = vringh_kiov_length(&vq->in_iov) - 1;
94 
95 	to_pull = vringh_kiov_length(&vq->out_iov);
96 
97 	bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov, &hdr,
98 				      sizeof(hdr));
99 	if (bytes != sizeof(hdr)) {
100 		dev_err(&vdpasim->vdpa.dev, "request out header too short\n");
101 		return false;
102 	}
103 
104 	to_pull -= bytes;
105 
106 	type = vdpasim32_to_cpu(vdpasim, hdr.type);
107 	sector = vdpasim64_to_cpu(vdpasim, hdr.sector);
108 	offset = sector << SECTOR_SHIFT;
109 	status = VIRTIO_BLK_S_OK;
110 
111 	switch (type) {
112 	case VIRTIO_BLK_T_IN:
113 		if (!vdpasim_blk_check_range(sector, to_push)) {
114 			dev_err(&vdpasim->vdpa.dev,
115 				"reading over the capacity - offset: 0x%llx len: 0x%zx\n",
116 				offset, to_push);
117 			status = VIRTIO_BLK_S_IOERR;
118 			break;
119 		}
120 
121 		bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
122 					      vdpasim->buffer + offset,
123 					      to_push);
124 		if (bytes < 0) {
125 			dev_err(&vdpasim->vdpa.dev,
126 				"vringh_iov_push_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
127 				bytes, offset, to_push);
128 			status = VIRTIO_BLK_S_IOERR;
129 			break;
130 		}
131 
132 		pushed += bytes;
133 		break;
134 
135 	case VIRTIO_BLK_T_OUT:
136 		if (!vdpasim_blk_check_range(sector, to_pull)) {
137 			dev_err(&vdpasim->vdpa.dev,
138 				"writing over the capacity - offset: 0x%llx len: 0x%zx\n",
139 				offset, to_pull);
140 			status = VIRTIO_BLK_S_IOERR;
141 			break;
142 		}
143 
144 		bytes = vringh_iov_pull_iotlb(&vq->vring, &vq->out_iov,
145 					      vdpasim->buffer + offset,
146 					      to_pull);
147 		if (bytes < 0) {
148 			dev_err(&vdpasim->vdpa.dev,
149 				"vringh_iov_pull_iotlb() error: %zd offset: 0x%llx len: 0x%zx\n",
150 				bytes, offset, to_pull);
151 			status = VIRTIO_BLK_S_IOERR;
152 			break;
153 		}
154 		break;
155 
156 	case VIRTIO_BLK_T_GET_ID:
157 		bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov,
158 					      vdpasim_blk_id,
159 					      VIRTIO_BLK_ID_BYTES);
160 		if (bytes < 0) {
161 			dev_err(&vdpasim->vdpa.dev,
162 				"vringh_iov_push_iotlb() error: %zd\n", bytes);
163 			status = VIRTIO_BLK_S_IOERR;
164 			break;
165 		}
166 
167 		pushed += bytes;
168 		break;
169 
170 	default:
171 		dev_warn(&vdpasim->vdpa.dev,
172 			 "Unsupported request type %d\n", type);
173 		status = VIRTIO_BLK_S_IOERR;
174 		break;
175 	}
176 
177 	/* If some operations fail, we need to skip the remaining bytes
178 	 * to put the status in the last byte
179 	 */
180 	if (to_push - pushed > 0)
181 		vringh_kiov_advance(&vq->in_iov, to_push - pushed);
182 
183 	/* Last byte is the status */
184 	bytes = vringh_iov_push_iotlb(&vq->vring, &vq->in_iov, &status, 1);
185 	if (bytes != 1)
186 		return false;
187 
188 	pushed += bytes;
189 
190 	/* Make sure data is wrote before advancing index */
191 	smp_wmb();
192 
193 	vringh_complete_iotlb(&vq->vring, vq->head, pushed);
194 
195 	return true;
196 }
197 
198 static void vdpasim_blk_work(struct work_struct *work)
199 {
200 	struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
201 	int i;
202 
203 	spin_lock(&vdpasim->lock);
204 
205 	if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
206 		goto out;
207 
208 	for (i = 0; i < VDPASIM_BLK_VQ_NUM; i++) {
209 		struct vdpasim_virtqueue *vq = &vdpasim->vqs[i];
210 
211 		if (!vq->ready)
212 			continue;
213 
214 		while (vdpasim_blk_handle_req(vdpasim, vq)) {
215 			/* Make sure used is visible before rasing the interrupt. */
216 			smp_wmb();
217 
218 			local_bh_disable();
219 			if (vringh_need_notify_iotlb(&vq->vring) > 0)
220 				vringh_notify(&vq->vring);
221 			local_bh_enable();
222 		}
223 	}
224 out:
225 	spin_unlock(&vdpasim->lock);
226 }
227 
228 static void vdpasim_blk_get_config(struct vdpasim *vdpasim, void *config)
229 {
230 	struct virtio_blk_config *blk_config = config;
231 
232 	memset(config, 0, sizeof(struct virtio_blk_config));
233 
234 	blk_config->capacity = cpu_to_vdpasim64(vdpasim, VDPASIM_BLK_CAPACITY);
235 	blk_config->size_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SIZE_MAX);
236 	blk_config->seg_max = cpu_to_vdpasim32(vdpasim, VDPASIM_BLK_SEG_MAX);
237 	blk_config->num_queues = cpu_to_vdpasim16(vdpasim, VDPASIM_BLK_VQ_NUM);
238 	blk_config->min_io_size = cpu_to_vdpasim16(vdpasim, 1);
239 	blk_config->opt_io_size = cpu_to_vdpasim32(vdpasim, 1);
240 	blk_config->blk_size = cpu_to_vdpasim32(vdpasim, SECTOR_SIZE);
241 }
242 
243 static void vdpasim_blk_mgmtdev_release(struct device *dev)
244 {
245 }
246 
247 static struct device vdpasim_blk_mgmtdev = {
248 	.init_name = "vdpasim_blk",
249 	.release = vdpasim_blk_mgmtdev_release,
250 };
251 
252 static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
253 {
254 	struct vdpasim_dev_attr dev_attr = {};
255 	struct vdpasim *simdev;
256 	int ret;
257 
258 	dev_attr.mgmt_dev = mdev;
259 	dev_attr.name = name;
260 	dev_attr.id = VIRTIO_ID_BLOCK;
261 	dev_attr.supported_features = VDPASIM_BLK_FEATURES;
262 	dev_attr.nvqs = VDPASIM_BLK_VQ_NUM;
263 	dev_attr.config_size = sizeof(struct virtio_blk_config);
264 	dev_attr.get_config = vdpasim_blk_get_config;
265 	dev_attr.work_fn = vdpasim_blk_work;
266 	dev_attr.buffer_size = VDPASIM_BLK_CAPACITY << SECTOR_SHIFT;
267 
268 	simdev = vdpasim_create(&dev_attr);
269 	if (IS_ERR(simdev))
270 		return PTR_ERR(simdev);
271 
272 	ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_BLK_VQ_NUM);
273 	if (ret)
274 		goto put_dev;
275 
276 	return 0;
277 
278 put_dev:
279 	put_device(&simdev->vdpa.dev);
280 	return ret;
281 }
282 
283 static void vdpasim_blk_dev_del(struct vdpa_mgmt_dev *mdev,
284 				struct vdpa_device *dev)
285 {
286 	struct vdpasim *simdev = container_of(dev, struct vdpasim, vdpa);
287 
288 	_vdpa_unregister_device(&simdev->vdpa);
289 }
290 
291 static const struct vdpa_mgmtdev_ops vdpasim_blk_mgmtdev_ops = {
292 	.dev_add = vdpasim_blk_dev_add,
293 	.dev_del = vdpasim_blk_dev_del
294 };
295 
296 static struct virtio_device_id id_table[] = {
297 	{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
298 	{ 0 },
299 };
300 
301 static struct vdpa_mgmt_dev mgmt_dev = {
302 	.device = &vdpasim_blk_mgmtdev,
303 	.id_table = id_table,
304 	.ops = &vdpasim_blk_mgmtdev_ops,
305 };
306 
307 static int __init vdpasim_blk_init(void)
308 {
309 	int ret;
310 
311 	ret = device_register(&vdpasim_blk_mgmtdev);
312 	if (ret)
313 		return ret;
314 
315 	ret = vdpa_mgmtdev_register(&mgmt_dev);
316 	if (ret)
317 		goto parent_err;
318 
319 	return 0;
320 
321 parent_err:
322 	device_unregister(&vdpasim_blk_mgmtdev);
323 	return ret;
324 }
325 
326 static void __exit vdpasim_blk_exit(void)
327 {
328 	vdpa_mgmtdev_unregister(&mgmt_dev);
329 	device_unregister(&vdpasim_blk_mgmtdev);
330 }
331 
332 module_init(vdpasim_blk_init)
333 module_exit(vdpasim_blk_exit)
334 
335 MODULE_VERSION(DRV_VERSION);
336 MODULE_LICENSE(DRV_LICENSE);
337 MODULE_AUTHOR(DRV_AUTHOR);
338 MODULE_DESCRIPTION(DRV_DESC);
339