xref: /linux/drivers/vhost/test.c (revision 1c4b5ecb7ea190fa3e9f9d6891e6c90b60e04f24)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Author: Michael S. Tsirkin <mst@redhat.com>
4  *
5  * test virtio server in host kernel.
6  */
7 
8 #include <linux/compat.h>
9 #include <linux/eventfd.h>
10 #include <linux/vhost.h>
11 #include <linux/miscdevice.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/workqueue.h>
15 #include <linux/file.h>
16 #include <linux/slab.h>
17 
18 #include "test.h"
19 #include "vhost.h"
20 
21 /* Max number of bytes transferred before requeueing the job.
22  * Using this limit prevents one virtqueue from starving others. */
23 #define VHOST_TEST_WEIGHT 0x80000
24 
25 /* Max number of packets transferred before requeueing the job.
26  * Using this limit prevents one virtqueue from starving others with
27  * pkts.
28  */
29 #define VHOST_TEST_PKT_WEIGHT 256
30 
31 enum {
32 	VHOST_TEST_VQ = 0,
33 	VHOST_TEST_VQ_MAX = 1,
34 };
35 
36 struct vhost_test {
37 	struct vhost_dev dev;
38 	struct vhost_virtqueue vqs[VHOST_TEST_VQ_MAX];
39 };
40 
41 /* Expects to be always run from workqueue - which acts as
42  * read-size critical section for our kind of RCU. */
43 static void handle_vq(struct vhost_test *n)
44 {
45 	struct vhost_virtqueue *vq = &n->vqs[VHOST_TEST_VQ];
46 	unsigned out, in;
47 	int head;
48 	size_t len, total_len = 0;
49 	void *private;
50 
51 	mutex_lock(&vq->mutex);
52 	private = vhost_vq_get_backend(vq);
53 	if (!private) {
54 		mutex_unlock(&vq->mutex);
55 		return;
56 	}
57 
58 	vhost_disable_notify(&n->dev, vq);
59 
60 	for (;;) {
61 		head = vhost_get_vq_desc(vq, vq->iov,
62 					 ARRAY_SIZE(vq->iov),
63 					 &out, &in,
64 					 NULL, NULL);
65 		/* On error, stop handling until the next kick. */
66 		if (unlikely(head < 0))
67 			break;
68 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
69 		if (head == vq->num) {
70 			if (unlikely(vhost_enable_notify(&n->dev, vq))) {
71 				vhost_disable_notify(&n->dev, vq);
72 				continue;
73 			}
74 			break;
75 		}
76 		if (in) {
77 			vq_err(vq, "Unexpected descriptor format for TX: "
78 			       "out %d, int %d\n", out, in);
79 			break;
80 		}
81 		len = iov_length(vq->iov, out);
82 		/* Sanity check */
83 		if (!len) {
84 			vq_err(vq, "Unexpected 0 len for TX\n");
85 			break;
86 		}
87 		vhost_add_used_and_signal(&n->dev, vq, head, 0);
88 		total_len += len;
89 		if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
90 			break;
91 	}
92 
93 	mutex_unlock(&vq->mutex);
94 }
95 
96 static void handle_vq_kick(struct vhost_work *work)
97 {
98 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
99 						  poll.work);
100 	struct vhost_test *n = container_of(vq->dev, struct vhost_test, dev);
101 
102 	handle_vq(n);
103 }
104 
105 static int vhost_test_open(struct inode *inode, struct file *f)
106 {
107 	struct vhost_test *n = kmalloc(sizeof *n, GFP_KERNEL);
108 	struct vhost_dev *dev;
109 	struct vhost_virtqueue **vqs;
110 
111 	if (!n)
112 		return -ENOMEM;
113 	vqs = kmalloc_array(VHOST_TEST_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
114 	if (!vqs) {
115 		kfree(n);
116 		return -ENOMEM;
117 	}
118 
119 	dev = &n->dev;
120 	vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
121 	n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
122 	vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
123 		       VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT, true, NULL);
124 
125 	f->private_data = n;
126 
127 	return 0;
128 }
129 
130 static void *vhost_test_stop_vq(struct vhost_test *n,
131 				struct vhost_virtqueue *vq)
132 {
133 	void *private;
134 
135 	mutex_lock(&vq->mutex);
136 	private = vhost_vq_get_backend(vq);
137 	vhost_vq_set_backend(vq, NULL);
138 	mutex_unlock(&vq->mutex);
139 	return private;
140 }
141 
142 static void vhost_test_stop(struct vhost_test *n, void **privatep)
143 {
144 	*privatep = vhost_test_stop_vq(n, n->vqs + VHOST_TEST_VQ);
145 }
146 
147 static void vhost_test_flush_vq(struct vhost_test *n, int index)
148 {
149 	vhost_poll_flush(&n->vqs[index].poll);
150 }
151 
152 static void vhost_test_flush(struct vhost_test *n)
153 {
154 	vhost_test_flush_vq(n, VHOST_TEST_VQ);
155 }
156 
157 static int vhost_test_release(struct inode *inode, struct file *f)
158 {
159 	struct vhost_test *n = f->private_data;
160 	void  *private;
161 
162 	vhost_test_stop(n, &private);
163 	vhost_test_flush(n);
164 	vhost_dev_stop(&n->dev);
165 	vhost_dev_cleanup(&n->dev);
166 	/* We do an extra flush before freeing memory,
167 	 * since jobs can re-queue themselves. */
168 	vhost_test_flush(n);
169 	kfree(n->dev.vqs);
170 	kfree(n);
171 	return 0;
172 }
173 
174 static long vhost_test_run(struct vhost_test *n, int test)
175 {
176 	void *priv, *oldpriv;
177 	struct vhost_virtqueue *vq;
178 	int r, index;
179 
180 	if (test < 0 || test > 1)
181 		return -EINVAL;
182 
183 	mutex_lock(&n->dev.mutex);
184 	r = vhost_dev_check_owner(&n->dev);
185 	if (r)
186 		goto err;
187 
188 	for (index = 0; index < n->dev.nvqs; ++index) {
189 		/* Verify that ring has been setup correctly. */
190 		if (!vhost_vq_access_ok(&n->vqs[index])) {
191 			r = -EFAULT;
192 			goto err;
193 		}
194 	}
195 
196 	for (index = 0; index < n->dev.nvqs; ++index) {
197 		vq = n->vqs + index;
198 		mutex_lock(&vq->mutex);
199 		priv = test ? n : NULL;
200 
201 		/* start polling new socket */
202 		oldpriv = vhost_vq_get_backend(vq);
203 		vhost_vq_set_backend(vq, priv);
204 
205 		r = vhost_vq_init_access(&n->vqs[index]);
206 
207 		mutex_unlock(&vq->mutex);
208 
209 		if (r)
210 			goto err;
211 
212 		if (oldpriv) {
213 			vhost_test_flush_vq(n, index);
214 		}
215 	}
216 
217 	mutex_unlock(&n->dev.mutex);
218 	return 0;
219 
220 err:
221 	mutex_unlock(&n->dev.mutex);
222 	return r;
223 }
224 
225 static long vhost_test_reset_owner(struct vhost_test *n)
226 {
227 	void *priv = NULL;
228 	long err;
229 	struct vhost_iotlb *umem;
230 
231 	mutex_lock(&n->dev.mutex);
232 	err = vhost_dev_check_owner(&n->dev);
233 	if (err)
234 		goto done;
235 	umem = vhost_dev_reset_owner_prepare();
236 	if (!umem) {
237 		err = -ENOMEM;
238 		goto done;
239 	}
240 	vhost_test_stop(n, &priv);
241 	vhost_test_flush(n);
242 	vhost_dev_stop(&n->dev);
243 	vhost_dev_reset_owner(&n->dev, umem);
244 done:
245 	mutex_unlock(&n->dev.mutex);
246 	return err;
247 }
248 
249 static int vhost_test_set_features(struct vhost_test *n, u64 features)
250 {
251 	struct vhost_virtqueue *vq;
252 
253 	mutex_lock(&n->dev.mutex);
254 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
255 	    !vhost_log_access_ok(&n->dev)) {
256 		mutex_unlock(&n->dev.mutex);
257 		return -EFAULT;
258 	}
259 	vq = &n->vqs[VHOST_TEST_VQ];
260 	mutex_lock(&vq->mutex);
261 	vq->acked_features = features;
262 	mutex_unlock(&vq->mutex);
263 	mutex_unlock(&n->dev.mutex);
264 	return 0;
265 }
266 
267 static long vhost_test_set_backend(struct vhost_test *n, unsigned index, int fd)
268 {
269 	static void *backend;
270 
271 	const bool enable = fd != -1;
272 	struct vhost_virtqueue *vq;
273 	int r;
274 
275 	mutex_lock(&n->dev.mutex);
276 	r = vhost_dev_check_owner(&n->dev);
277 	if (r)
278 		goto err;
279 
280 	if (index >= VHOST_TEST_VQ_MAX) {
281 		r = -ENOBUFS;
282 		goto err;
283 	}
284 	vq = &n->vqs[index];
285 	mutex_lock(&vq->mutex);
286 
287 	/* Verify that ring has been setup correctly. */
288 	if (!vhost_vq_access_ok(vq)) {
289 		r = -EFAULT;
290 		goto err_vq;
291 	}
292 	if (!enable) {
293 		vhost_poll_stop(&vq->poll);
294 		backend = vhost_vq_get_backend(vq);
295 		vhost_vq_set_backend(vq, NULL);
296 	} else {
297 		vhost_vq_set_backend(vq, backend);
298 		r = vhost_vq_init_access(vq);
299 		if (r == 0)
300 			r = vhost_poll_start(&vq->poll, vq->kick);
301 	}
302 
303 	mutex_unlock(&vq->mutex);
304 
305 	if (enable) {
306 		vhost_test_flush_vq(n, index);
307 	}
308 
309 	mutex_unlock(&n->dev.mutex);
310 	return 0;
311 
312 err_vq:
313 	mutex_unlock(&vq->mutex);
314 err:
315 	mutex_unlock(&n->dev.mutex);
316 	return r;
317 }
318 
319 static long vhost_test_ioctl(struct file *f, unsigned int ioctl,
320 			     unsigned long arg)
321 {
322 	struct vhost_vring_file backend;
323 	struct vhost_test *n = f->private_data;
324 	void __user *argp = (void __user *)arg;
325 	u64 __user *featurep = argp;
326 	int test;
327 	u64 features;
328 	int r;
329 	switch (ioctl) {
330 	case VHOST_TEST_RUN:
331 		if (copy_from_user(&test, argp, sizeof test))
332 			return -EFAULT;
333 		return vhost_test_run(n, test);
334 	case VHOST_TEST_SET_BACKEND:
335 		if (copy_from_user(&backend, argp, sizeof backend))
336 			return -EFAULT;
337 		return vhost_test_set_backend(n, backend.index, backend.fd);
338 	case VHOST_GET_FEATURES:
339 		features = VHOST_FEATURES;
340 		if (copy_to_user(featurep, &features, sizeof features))
341 			return -EFAULT;
342 		return 0;
343 	case VHOST_SET_FEATURES:
344 		printk(KERN_ERR "1\n");
345 		if (copy_from_user(&features, featurep, sizeof features))
346 			return -EFAULT;
347 		printk(KERN_ERR "2\n");
348 		if (features & ~VHOST_FEATURES)
349 			return -EOPNOTSUPP;
350 		printk(KERN_ERR "3\n");
351 		return vhost_test_set_features(n, features);
352 	case VHOST_RESET_OWNER:
353 		return vhost_test_reset_owner(n);
354 	default:
355 		mutex_lock(&n->dev.mutex);
356 		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
357                 if (r == -ENOIOCTLCMD)
358                         r = vhost_vring_ioctl(&n->dev, ioctl, argp);
359 		vhost_test_flush(n);
360 		mutex_unlock(&n->dev.mutex);
361 		return r;
362 	}
363 }
364 
365 static const struct file_operations vhost_test_fops = {
366 	.owner          = THIS_MODULE,
367 	.release        = vhost_test_release,
368 	.unlocked_ioctl = vhost_test_ioctl,
369 	.compat_ioctl   = compat_ptr_ioctl,
370 	.open           = vhost_test_open,
371 	.llseek		= noop_llseek,
372 };
373 
374 static struct miscdevice vhost_test_misc = {
375 	MISC_DYNAMIC_MINOR,
376 	"vhost-test",
377 	&vhost_test_fops,
378 };
379 module_misc_device(vhost_test_misc);
380 
381 MODULE_VERSION("0.0.1");
382 MODULE_LICENSE("GPL v2");
383 MODULE_AUTHOR("Michael S. Tsirkin");
384 MODULE_DESCRIPTION("Host kernel side for virtio simulator");
385