xref: /linux/drivers/vhost/net.c (revision 4cb584e0ee7df70fd0376aee60cf701855ea8c81)
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * virtio-net server in host kernel.
7  */
8 
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/miscdevice.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/file.h>
19 #include <linux/slab.h>
20 #include <linux/vmalloc.h>
21 
22 #include <linux/net.h>
23 #include <linux/if_packet.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_tun.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/if_vlan.h>
28 
29 #include <net/sock.h>
30 
31 #include "vhost.h"
32 
33 static int experimental_zcopytx = 1;
34 module_param(experimental_zcopytx, int, 0444);
35 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
36 		                       " 1 -Enable; 0 - Disable");
37 
38 /* Max number of bytes transferred before requeueing the job.
39  * Using this limit prevents one virtqueue from starving others. */
40 #define VHOST_NET_WEIGHT 0x80000
41 
42 /* MAX number of TX used buffers for outstanding zerocopy */
43 #define VHOST_MAX_PEND 128
44 #define VHOST_GOODCOPY_LEN 256
45 
46 /*
47  * For transmit, used buffer len is unused; we override it to track buffer
48  * status internally; used for zerocopy tx only.
49  */
50 /* Lower device DMA failed */
51 #define VHOST_DMA_FAILED_LEN	((__force __virtio32)3)
52 /* Lower device DMA done */
53 #define VHOST_DMA_DONE_LEN	((__force __virtio32)2)
54 /* Lower device DMA in progress */
55 #define VHOST_DMA_IN_PROGRESS	((__force __virtio32)1)
56 /* Buffer unused */
57 #define VHOST_DMA_CLEAR_LEN	((__force __virtio32)0)
58 
59 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
60 
61 enum {
62 	VHOST_NET_FEATURES = VHOST_FEATURES |
63 			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
64 			 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
65 			 (1ULL << VIRTIO_F_IOMMU_PLATFORM)
66 };
67 
68 enum {
69 	VHOST_NET_VQ_RX = 0,
70 	VHOST_NET_VQ_TX = 1,
71 	VHOST_NET_VQ_MAX = 2,
72 };
73 
74 struct vhost_net_ubuf_ref {
75 	/* refcount follows semantics similar to kref:
76 	 *  0: object is released
77 	 *  1: no outstanding ubufs
78 	 * >1: outstanding ubufs
79 	 */
80 	atomic_t refcount;
81 	wait_queue_head_t wait;
82 	struct vhost_virtqueue *vq;
83 };
84 
85 struct vhost_net_virtqueue {
86 	struct vhost_virtqueue vq;
87 	size_t vhost_hlen;
88 	size_t sock_hlen;
89 	/* vhost zerocopy support fields below: */
90 	/* last used idx for outstanding DMA zerocopy buffers */
91 	int upend_idx;
92 	/* first used idx for DMA done zerocopy buffers */
93 	int done_idx;
94 	/* an array of userspace buffers info */
95 	struct ubuf_info *ubuf_info;
96 	/* Reference counting for outstanding ubufs.
97 	 * Protected by vq mutex. Writers must also take device mutex. */
98 	struct vhost_net_ubuf_ref *ubufs;
99 };
100 
101 struct vhost_net {
102 	struct vhost_dev dev;
103 	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
104 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
105 	/* Number of TX recently submitted.
106 	 * Protected by tx vq lock. */
107 	unsigned tx_packets;
108 	/* Number of times zerocopy TX recently failed.
109 	 * Protected by tx vq lock. */
110 	unsigned tx_zcopy_err;
111 	/* Flush in progress. Protected by tx vq lock. */
112 	bool tx_flush;
113 };
114 
115 static unsigned vhost_net_zcopy_mask __read_mostly;
116 
117 static void vhost_net_enable_zcopy(int vq)
118 {
119 	vhost_net_zcopy_mask |= 0x1 << vq;
120 }
121 
122 static struct vhost_net_ubuf_ref *
123 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
124 {
125 	struct vhost_net_ubuf_ref *ubufs;
126 	/* No zero copy backend? Nothing to count. */
127 	if (!zcopy)
128 		return NULL;
129 	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
130 	if (!ubufs)
131 		return ERR_PTR(-ENOMEM);
132 	atomic_set(&ubufs->refcount, 1);
133 	init_waitqueue_head(&ubufs->wait);
134 	ubufs->vq = vq;
135 	return ubufs;
136 }
137 
138 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
139 {
140 	int r = atomic_sub_return(1, &ubufs->refcount);
141 	if (unlikely(!r))
142 		wake_up(&ubufs->wait);
143 	return r;
144 }
145 
146 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
147 {
148 	vhost_net_ubuf_put(ubufs);
149 	wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
150 }
151 
152 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
153 {
154 	vhost_net_ubuf_put_and_wait(ubufs);
155 	kfree(ubufs);
156 }
157 
158 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
159 {
160 	int i;
161 
162 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
163 		kfree(n->vqs[i].ubuf_info);
164 		n->vqs[i].ubuf_info = NULL;
165 	}
166 }
167 
168 static int vhost_net_set_ubuf_info(struct vhost_net *n)
169 {
170 	bool zcopy;
171 	int i;
172 
173 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
174 		zcopy = vhost_net_zcopy_mask & (0x1 << i);
175 		if (!zcopy)
176 			continue;
177 		n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
178 					      UIO_MAXIOV, GFP_KERNEL);
179 		if  (!n->vqs[i].ubuf_info)
180 			goto err;
181 	}
182 	return 0;
183 
184 err:
185 	vhost_net_clear_ubuf_info(n);
186 	return -ENOMEM;
187 }
188 
189 static void vhost_net_vq_reset(struct vhost_net *n)
190 {
191 	int i;
192 
193 	vhost_net_clear_ubuf_info(n);
194 
195 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
196 		n->vqs[i].done_idx = 0;
197 		n->vqs[i].upend_idx = 0;
198 		n->vqs[i].ubufs = NULL;
199 		n->vqs[i].vhost_hlen = 0;
200 		n->vqs[i].sock_hlen = 0;
201 	}
202 
203 }
204 
205 static void vhost_net_tx_packet(struct vhost_net *net)
206 {
207 	++net->tx_packets;
208 	if (net->tx_packets < 1024)
209 		return;
210 	net->tx_packets = 0;
211 	net->tx_zcopy_err = 0;
212 }
213 
214 static void vhost_net_tx_err(struct vhost_net *net)
215 {
216 	++net->tx_zcopy_err;
217 }
218 
219 static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
220 {
221 	/* TX flush waits for outstanding DMAs to be done.
222 	 * Don't start new DMAs.
223 	 */
224 	return !net->tx_flush &&
225 		net->tx_packets / 64 >= net->tx_zcopy_err;
226 }
227 
228 static bool vhost_sock_zcopy(struct socket *sock)
229 {
230 	return unlikely(experimental_zcopytx) &&
231 		sock_flag(sock->sk, SOCK_ZEROCOPY);
232 }
233 
234 /* In case of DMA done not in order in lower device driver for some reason.
235  * upend_idx is used to track end of used idx, done_idx is used to track head
236  * of used idx. Once lower device DMA done contiguously, we will signal KVM
237  * guest used idx.
238  */
239 static void vhost_zerocopy_signal_used(struct vhost_net *net,
240 				       struct vhost_virtqueue *vq)
241 {
242 	struct vhost_net_virtqueue *nvq =
243 		container_of(vq, struct vhost_net_virtqueue, vq);
244 	int i, add;
245 	int j = 0;
246 
247 	for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
248 		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
249 			vhost_net_tx_err(net);
250 		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
251 			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
252 			++j;
253 		} else
254 			break;
255 	}
256 	while (j) {
257 		add = min(UIO_MAXIOV - nvq->done_idx, j);
258 		vhost_add_used_and_signal_n(vq->dev, vq,
259 					    &vq->heads[nvq->done_idx], add);
260 		nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
261 		j -= add;
262 	}
263 }
264 
265 static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
266 {
267 	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
268 	struct vhost_virtqueue *vq = ubufs->vq;
269 	int cnt;
270 
271 	rcu_read_lock_bh();
272 
273 	/* set len to mark this desc buffers done DMA */
274 	vq->heads[ubuf->desc].len = success ?
275 		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
276 	cnt = vhost_net_ubuf_put(ubufs);
277 
278 	/*
279 	 * Trigger polling thread if guest stopped submitting new buffers:
280 	 * in this case, the refcount after decrement will eventually reach 1.
281 	 * We also trigger polling periodically after each 16 packets
282 	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
283 	 * less than 10% of times).
284 	 */
285 	if (cnt <= 1 || !(cnt % 16))
286 		vhost_poll_queue(&vq->poll);
287 
288 	rcu_read_unlock_bh();
289 }
290 
291 static inline unsigned long busy_clock(void)
292 {
293 	return local_clock() >> 10;
294 }
295 
296 static bool vhost_can_busy_poll(struct vhost_dev *dev,
297 				unsigned long endtime)
298 {
299 	return likely(!need_resched()) &&
300 	       likely(!time_after(busy_clock(), endtime)) &&
301 	       likely(!signal_pending(current)) &&
302 	       !vhost_has_work(dev);
303 }
304 
305 static void vhost_net_disable_vq(struct vhost_net *n,
306 				 struct vhost_virtqueue *vq)
307 {
308 	struct vhost_net_virtqueue *nvq =
309 		container_of(vq, struct vhost_net_virtqueue, vq);
310 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
311 	if (!vq->private_data)
312 		return;
313 	vhost_poll_stop(poll);
314 }
315 
316 static int vhost_net_enable_vq(struct vhost_net *n,
317 				struct vhost_virtqueue *vq)
318 {
319 	struct vhost_net_virtqueue *nvq =
320 		container_of(vq, struct vhost_net_virtqueue, vq);
321 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
322 	struct socket *sock;
323 
324 	sock = vq->private_data;
325 	if (!sock)
326 		return 0;
327 
328 	return vhost_poll_start(poll, sock->file);
329 }
330 
331 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
332 				    struct vhost_virtqueue *vq,
333 				    struct iovec iov[], unsigned int iov_size,
334 				    unsigned int *out_num, unsigned int *in_num)
335 {
336 	unsigned long uninitialized_var(endtime);
337 	int r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
338 				  out_num, in_num, NULL, NULL);
339 
340 	if (r == vq->num && vq->busyloop_timeout) {
341 		preempt_disable();
342 		endtime = busy_clock() + vq->busyloop_timeout;
343 		while (vhost_can_busy_poll(vq->dev, endtime) &&
344 		       vhost_vq_avail_empty(vq->dev, vq))
345 			cpu_relax();
346 		preempt_enable();
347 		r = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
348 				      out_num, in_num, NULL, NULL);
349 	}
350 
351 	return r;
352 }
353 
354 static bool vhost_exceeds_maxpend(struct vhost_net *net)
355 {
356 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
357 	struct vhost_virtqueue *vq = &nvq->vq;
358 
359 	return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
360 		== nvq->done_idx;
361 }
362 
363 /* Expects to be always run from workqueue - which acts as
364  * read-size critical section for our kind of RCU. */
365 static void handle_tx(struct vhost_net *net)
366 {
367 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
368 	struct vhost_virtqueue *vq = &nvq->vq;
369 	unsigned out, in;
370 	int head;
371 	struct msghdr msg = {
372 		.msg_name = NULL,
373 		.msg_namelen = 0,
374 		.msg_control = NULL,
375 		.msg_controllen = 0,
376 		.msg_flags = MSG_DONTWAIT,
377 	};
378 	size_t len, total_len = 0;
379 	int err;
380 	size_t hdr_size;
381 	struct socket *sock;
382 	struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
383 	bool zcopy, zcopy_used;
384 
385 	mutex_lock(&vq->mutex);
386 	sock = vq->private_data;
387 	if (!sock)
388 		goto out;
389 
390 	if (!vq_iotlb_prefetch(vq))
391 		goto out;
392 
393 	vhost_disable_notify(&net->dev, vq);
394 
395 	hdr_size = nvq->vhost_hlen;
396 	zcopy = nvq->ubufs;
397 
398 	for (;;) {
399 		/* Release DMAs done buffers first */
400 		if (zcopy)
401 			vhost_zerocopy_signal_used(net, vq);
402 
403 		/* If more outstanding DMAs, queue the work.
404 		 * Handle upend_idx wrap around
405 		 */
406 		if (unlikely(vhost_exceeds_maxpend(net)))
407 			break;
408 
409 		head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
410 						ARRAY_SIZE(vq->iov),
411 						&out, &in);
412 		/* On error, stop handling until the next kick. */
413 		if (unlikely(head < 0))
414 			break;
415 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
416 		if (head == vq->num) {
417 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
418 				vhost_disable_notify(&net->dev, vq);
419 				continue;
420 			}
421 			break;
422 		}
423 		if (in) {
424 			vq_err(vq, "Unexpected descriptor format for TX: "
425 			       "out %d, int %d\n", out, in);
426 			break;
427 		}
428 		/* Skip header. TODO: support TSO. */
429 		len = iov_length(vq->iov, out);
430 		iov_iter_init(&msg.msg_iter, WRITE, vq->iov, out, len);
431 		iov_iter_advance(&msg.msg_iter, hdr_size);
432 		/* Sanity check */
433 		if (!msg_data_left(&msg)) {
434 			vq_err(vq, "Unexpected header len for TX: "
435 			       "%zd expected %zd\n",
436 			       len, hdr_size);
437 			break;
438 		}
439 		len = msg_data_left(&msg);
440 
441 		zcopy_used = zcopy && len >= VHOST_GOODCOPY_LEN
442 				   && (nvq->upend_idx + 1) % UIO_MAXIOV !=
443 				      nvq->done_idx
444 				   && vhost_net_tx_select_zcopy(net);
445 
446 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
447 		if (zcopy_used) {
448 			struct ubuf_info *ubuf;
449 			ubuf = nvq->ubuf_info + nvq->upend_idx;
450 
451 			vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
452 			vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
453 			ubuf->callback = vhost_zerocopy_callback;
454 			ubuf->ctx = nvq->ubufs;
455 			ubuf->desc = nvq->upend_idx;
456 			msg.msg_control = ubuf;
457 			msg.msg_controllen = sizeof(ubuf);
458 			ubufs = nvq->ubufs;
459 			atomic_inc(&ubufs->refcount);
460 			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
461 		} else {
462 			msg.msg_control = NULL;
463 			ubufs = NULL;
464 		}
465 
466 		total_len += len;
467 		if (total_len < VHOST_NET_WEIGHT &&
468 		    !vhost_vq_avail_empty(&net->dev, vq) &&
469 		    likely(!vhost_exceeds_maxpend(net))) {
470 			msg.msg_flags |= MSG_MORE;
471 		} else {
472 			msg.msg_flags &= ~MSG_MORE;
473 		}
474 
475 		/* TODO: Check specific error and bomb out unless ENOBUFS? */
476 		err = sock->ops->sendmsg(sock, &msg, len);
477 		if (unlikely(err < 0)) {
478 			if (zcopy_used) {
479 				vhost_net_ubuf_put(ubufs);
480 				nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
481 					% UIO_MAXIOV;
482 			}
483 			vhost_discard_vq_desc(vq, 1);
484 			break;
485 		}
486 		if (err != len)
487 			pr_debug("Truncated TX packet: "
488 				 " len %d != %zd\n", err, len);
489 		if (!zcopy_used)
490 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
491 		else
492 			vhost_zerocopy_signal_used(net, vq);
493 		vhost_net_tx_packet(net);
494 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
495 			vhost_poll_queue(&vq->poll);
496 			break;
497 		}
498 	}
499 out:
500 	mutex_unlock(&vq->mutex);
501 }
502 
503 static int peek_head_len(struct sock *sk)
504 {
505 	struct socket *sock = sk->sk_socket;
506 	struct sk_buff *head;
507 	int len = 0;
508 	unsigned long flags;
509 
510 	if (sock->ops->peek_len)
511 		return sock->ops->peek_len(sock);
512 
513 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
514 	head = skb_peek(&sk->sk_receive_queue);
515 	if (likely(head)) {
516 		len = head->len;
517 		if (skb_vlan_tag_present(head))
518 			len += VLAN_HLEN;
519 	}
520 
521 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
522 	return len;
523 }
524 
525 static int sk_has_rx_data(struct sock *sk)
526 {
527 	struct socket *sock = sk->sk_socket;
528 
529 	if (sock->ops->peek_len)
530 		return sock->ops->peek_len(sock);
531 
532 	return skb_queue_empty(&sk->sk_receive_queue);
533 }
534 
535 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk)
536 {
537 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
538 	struct vhost_virtqueue *vq = &nvq->vq;
539 	unsigned long uninitialized_var(endtime);
540 	int len = peek_head_len(sk);
541 
542 	if (!len && vq->busyloop_timeout) {
543 		/* Both tx vq and rx socket were polled here */
544 		mutex_lock(&vq->mutex);
545 		vhost_disable_notify(&net->dev, vq);
546 
547 		preempt_disable();
548 		endtime = busy_clock() + vq->busyloop_timeout;
549 
550 		while (vhost_can_busy_poll(&net->dev, endtime) &&
551 		       !sk_has_rx_data(sk) &&
552 		       vhost_vq_avail_empty(&net->dev, vq))
553 			cpu_relax();
554 
555 		preempt_enable();
556 
557 		if (vhost_enable_notify(&net->dev, vq))
558 			vhost_poll_queue(&vq->poll);
559 		mutex_unlock(&vq->mutex);
560 
561 		len = peek_head_len(sk);
562 	}
563 
564 	return len;
565 }
566 
567 /* This is a multi-buffer version of vhost_get_desc, that works if
568  *	vq has read descriptors only.
569  * @vq		- the relevant virtqueue
570  * @datalen	- data length we'll be reading
571  * @iovcount	- returned count of io vectors we fill
572  * @log		- vhost log
573  * @log_num	- log offset
574  * @quota       - headcount quota, 1 for big buffer
575  *	returns number of buffer heads allocated, negative on error
576  */
577 static int get_rx_bufs(struct vhost_virtqueue *vq,
578 		       struct vring_used_elem *heads,
579 		       int datalen,
580 		       unsigned *iovcount,
581 		       struct vhost_log *log,
582 		       unsigned *log_num,
583 		       unsigned int quota)
584 {
585 	unsigned int out, in;
586 	int seg = 0;
587 	int headcount = 0;
588 	unsigned d;
589 	int r, nlogs = 0;
590 	/* len is always initialized before use since we are always called with
591 	 * datalen > 0.
592 	 */
593 	u32 uninitialized_var(len);
594 
595 	while (datalen > 0 && headcount < quota) {
596 		if (unlikely(seg >= UIO_MAXIOV)) {
597 			r = -ENOBUFS;
598 			goto err;
599 		}
600 		r = vhost_get_vq_desc(vq, vq->iov + seg,
601 				      ARRAY_SIZE(vq->iov) - seg, &out,
602 				      &in, log, log_num);
603 		if (unlikely(r < 0))
604 			goto err;
605 
606 		d = r;
607 		if (d == vq->num) {
608 			r = 0;
609 			goto err;
610 		}
611 		if (unlikely(out || in <= 0)) {
612 			vq_err(vq, "unexpected descriptor format for RX: "
613 				"out %d, in %d\n", out, in);
614 			r = -EINVAL;
615 			goto err;
616 		}
617 		if (unlikely(log)) {
618 			nlogs += *log_num;
619 			log += *log_num;
620 		}
621 		heads[headcount].id = cpu_to_vhost32(vq, d);
622 		len = iov_length(vq->iov + seg, in);
623 		heads[headcount].len = cpu_to_vhost32(vq, len);
624 		datalen -= len;
625 		++headcount;
626 		seg += in;
627 	}
628 	heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
629 	*iovcount = seg;
630 	if (unlikely(log))
631 		*log_num = nlogs;
632 
633 	/* Detect overrun */
634 	if (unlikely(datalen > 0)) {
635 		r = UIO_MAXIOV + 1;
636 		goto err;
637 	}
638 	return headcount;
639 err:
640 	vhost_discard_vq_desc(vq, headcount);
641 	return r;
642 }
643 
644 /* Expects to be always run from workqueue - which acts as
645  * read-size critical section for our kind of RCU. */
646 static void handle_rx(struct vhost_net *net)
647 {
648 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
649 	struct vhost_virtqueue *vq = &nvq->vq;
650 	unsigned uninitialized_var(in), log;
651 	struct vhost_log *vq_log;
652 	struct msghdr msg = {
653 		.msg_name = NULL,
654 		.msg_namelen = 0,
655 		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
656 		.msg_controllen = 0,
657 		.msg_flags = MSG_DONTWAIT,
658 	};
659 	struct virtio_net_hdr hdr = {
660 		.flags = 0,
661 		.gso_type = VIRTIO_NET_HDR_GSO_NONE
662 	};
663 	size_t total_len = 0;
664 	int err, mergeable;
665 	s16 headcount;
666 	size_t vhost_hlen, sock_hlen;
667 	size_t vhost_len, sock_len;
668 	struct socket *sock;
669 	struct iov_iter fixup;
670 	__virtio16 num_buffers;
671 
672 	mutex_lock(&vq->mutex);
673 	sock = vq->private_data;
674 	if (!sock)
675 		goto out;
676 
677 	if (!vq_iotlb_prefetch(vq))
678 		goto out;
679 
680 	vhost_disable_notify(&net->dev, vq);
681 	vhost_net_disable_vq(net, vq);
682 
683 	vhost_hlen = nvq->vhost_hlen;
684 	sock_hlen = nvq->sock_hlen;
685 
686 	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
687 		vq->log : NULL;
688 	mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
689 
690 	while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) {
691 		sock_len += sock_hlen;
692 		vhost_len = sock_len + vhost_hlen;
693 		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
694 					&in, vq_log, &log,
695 					likely(mergeable) ? UIO_MAXIOV : 1);
696 		/* On error, stop handling until the next kick. */
697 		if (unlikely(headcount < 0))
698 			goto out;
699 		/* On overrun, truncate and discard */
700 		if (unlikely(headcount > UIO_MAXIOV)) {
701 			iov_iter_init(&msg.msg_iter, READ, vq->iov, 1, 1);
702 			err = sock->ops->recvmsg(sock, &msg,
703 						 1, MSG_DONTWAIT | MSG_TRUNC);
704 			pr_debug("Discarded rx packet: len %zd\n", sock_len);
705 			continue;
706 		}
707 		/* OK, now we need to know about added descriptors. */
708 		if (!headcount) {
709 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
710 				/* They have slipped one in as we were
711 				 * doing that: check again. */
712 				vhost_disable_notify(&net->dev, vq);
713 				continue;
714 			}
715 			/* Nothing new?  Wait for eventfd to tell us
716 			 * they refilled. */
717 			goto out;
718 		}
719 		/* We don't need to be notified again. */
720 		iov_iter_init(&msg.msg_iter, READ, vq->iov, in, vhost_len);
721 		fixup = msg.msg_iter;
722 		if (unlikely((vhost_hlen))) {
723 			/* We will supply the header ourselves
724 			 * TODO: support TSO.
725 			 */
726 			iov_iter_advance(&msg.msg_iter, vhost_hlen);
727 		}
728 		err = sock->ops->recvmsg(sock, &msg,
729 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
730 		/* Userspace might have consumed the packet meanwhile:
731 		 * it's not supposed to do this usually, but might be hard
732 		 * to prevent. Discard data we got (if any) and keep going. */
733 		if (unlikely(err != sock_len)) {
734 			pr_debug("Discarded rx packet: "
735 				 " len %d, expected %zd\n", err, sock_len);
736 			vhost_discard_vq_desc(vq, headcount);
737 			continue;
738 		}
739 		/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
740 		if (unlikely(vhost_hlen)) {
741 			if (copy_to_iter(&hdr, sizeof(hdr),
742 					 &fixup) != sizeof(hdr)) {
743 				vq_err(vq, "Unable to write vnet_hdr "
744 				       "at addr %p\n", vq->iov->iov_base);
745 				goto out;
746 			}
747 		} else {
748 			/* Header came from socket; we'll need to patch
749 			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
750 			 */
751 			iov_iter_advance(&fixup, sizeof(hdr));
752 		}
753 		/* TODO: Should check and handle checksum. */
754 
755 		num_buffers = cpu_to_vhost16(vq, headcount);
756 		if (likely(mergeable) &&
757 		    copy_to_iter(&num_buffers, sizeof num_buffers,
758 				 &fixup) != sizeof num_buffers) {
759 			vq_err(vq, "Failed num_buffers write");
760 			vhost_discard_vq_desc(vq, headcount);
761 			goto out;
762 		}
763 		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
764 					    headcount);
765 		if (unlikely(vq_log))
766 			vhost_log_write(vq, vq_log, log, vhost_len);
767 		total_len += vhost_len;
768 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
769 			vhost_poll_queue(&vq->poll);
770 			goto out;
771 		}
772 	}
773 	vhost_net_enable_vq(net, vq);
774 out:
775 	mutex_unlock(&vq->mutex);
776 }
777 
778 static void handle_tx_kick(struct vhost_work *work)
779 {
780 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
781 						  poll.work);
782 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
783 
784 	handle_tx(net);
785 }
786 
787 static void handle_rx_kick(struct vhost_work *work)
788 {
789 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
790 						  poll.work);
791 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
792 
793 	handle_rx(net);
794 }
795 
796 static void handle_tx_net(struct vhost_work *work)
797 {
798 	struct vhost_net *net = container_of(work, struct vhost_net,
799 					     poll[VHOST_NET_VQ_TX].work);
800 	handle_tx(net);
801 }
802 
803 static void handle_rx_net(struct vhost_work *work)
804 {
805 	struct vhost_net *net = container_of(work, struct vhost_net,
806 					     poll[VHOST_NET_VQ_RX].work);
807 	handle_rx(net);
808 }
809 
810 static int vhost_net_open(struct inode *inode, struct file *f)
811 {
812 	struct vhost_net *n;
813 	struct vhost_dev *dev;
814 	struct vhost_virtqueue **vqs;
815 	int i;
816 
817 	n = kmalloc(sizeof *n, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
818 	if (!n) {
819 		n = vmalloc(sizeof *n);
820 		if (!n)
821 			return -ENOMEM;
822 	}
823 	vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
824 	if (!vqs) {
825 		kvfree(n);
826 		return -ENOMEM;
827 	}
828 
829 	dev = &n->dev;
830 	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
831 	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
832 	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
833 	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
834 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
835 		n->vqs[i].ubufs = NULL;
836 		n->vqs[i].ubuf_info = NULL;
837 		n->vqs[i].upend_idx = 0;
838 		n->vqs[i].done_idx = 0;
839 		n->vqs[i].vhost_hlen = 0;
840 		n->vqs[i].sock_hlen = 0;
841 	}
842 	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
843 
844 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
845 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
846 
847 	f->private_data = n;
848 
849 	return 0;
850 }
851 
852 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
853 					struct vhost_virtqueue *vq)
854 {
855 	struct socket *sock;
856 
857 	mutex_lock(&vq->mutex);
858 	sock = vq->private_data;
859 	vhost_net_disable_vq(n, vq);
860 	vq->private_data = NULL;
861 	mutex_unlock(&vq->mutex);
862 	return sock;
863 }
864 
865 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
866 			   struct socket **rx_sock)
867 {
868 	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
869 	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
870 }
871 
872 static void vhost_net_flush_vq(struct vhost_net *n, int index)
873 {
874 	vhost_poll_flush(n->poll + index);
875 	vhost_poll_flush(&n->vqs[index].vq.poll);
876 }
877 
878 static void vhost_net_flush(struct vhost_net *n)
879 {
880 	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
881 	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
882 	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
883 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
884 		n->tx_flush = true;
885 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
886 		/* Wait for all lower device DMAs done. */
887 		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
888 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
889 		n->tx_flush = false;
890 		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
891 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
892 	}
893 }
894 
895 static int vhost_net_release(struct inode *inode, struct file *f)
896 {
897 	struct vhost_net *n = f->private_data;
898 	struct socket *tx_sock;
899 	struct socket *rx_sock;
900 
901 	vhost_net_stop(n, &tx_sock, &rx_sock);
902 	vhost_net_flush(n);
903 	vhost_dev_stop(&n->dev);
904 	vhost_dev_cleanup(&n->dev, false);
905 	vhost_net_vq_reset(n);
906 	if (tx_sock)
907 		sockfd_put(tx_sock);
908 	if (rx_sock)
909 		sockfd_put(rx_sock);
910 	/* Make sure no callbacks are outstanding */
911 	synchronize_rcu_bh();
912 	/* We do an extra flush before freeing memory,
913 	 * since jobs can re-queue themselves. */
914 	vhost_net_flush(n);
915 	kfree(n->dev.vqs);
916 	kvfree(n);
917 	return 0;
918 }
919 
920 static struct socket *get_raw_socket(int fd)
921 {
922 	struct {
923 		struct sockaddr_ll sa;
924 		char  buf[MAX_ADDR_LEN];
925 	} uaddr;
926 	int uaddr_len = sizeof uaddr, r;
927 	struct socket *sock = sockfd_lookup(fd, &r);
928 
929 	if (!sock)
930 		return ERR_PTR(-ENOTSOCK);
931 
932 	/* Parameter checking */
933 	if (sock->sk->sk_type != SOCK_RAW) {
934 		r = -ESOCKTNOSUPPORT;
935 		goto err;
936 	}
937 
938 	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
939 			       &uaddr_len, 0);
940 	if (r)
941 		goto err;
942 
943 	if (uaddr.sa.sll_family != AF_PACKET) {
944 		r = -EPFNOSUPPORT;
945 		goto err;
946 	}
947 	return sock;
948 err:
949 	sockfd_put(sock);
950 	return ERR_PTR(r);
951 }
952 
953 static struct socket *get_tap_socket(int fd)
954 {
955 	struct file *file = fget(fd);
956 	struct socket *sock;
957 
958 	if (!file)
959 		return ERR_PTR(-EBADF);
960 	sock = tun_get_socket(file);
961 	if (!IS_ERR(sock))
962 		return sock;
963 	sock = macvtap_get_socket(file);
964 	if (IS_ERR(sock))
965 		fput(file);
966 	return sock;
967 }
968 
969 static struct socket *get_socket(int fd)
970 {
971 	struct socket *sock;
972 
973 	/* special case to disable backend */
974 	if (fd == -1)
975 		return NULL;
976 	sock = get_raw_socket(fd);
977 	if (!IS_ERR(sock))
978 		return sock;
979 	sock = get_tap_socket(fd);
980 	if (!IS_ERR(sock))
981 		return sock;
982 	return ERR_PTR(-ENOTSOCK);
983 }
984 
985 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
986 {
987 	struct socket *sock, *oldsock;
988 	struct vhost_virtqueue *vq;
989 	struct vhost_net_virtqueue *nvq;
990 	struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
991 	int r;
992 
993 	mutex_lock(&n->dev.mutex);
994 	r = vhost_dev_check_owner(&n->dev);
995 	if (r)
996 		goto err;
997 
998 	if (index >= VHOST_NET_VQ_MAX) {
999 		r = -ENOBUFS;
1000 		goto err;
1001 	}
1002 	vq = &n->vqs[index].vq;
1003 	nvq = &n->vqs[index];
1004 	mutex_lock(&vq->mutex);
1005 
1006 	/* Verify that ring has been setup correctly. */
1007 	if (!vhost_vq_access_ok(vq)) {
1008 		r = -EFAULT;
1009 		goto err_vq;
1010 	}
1011 	sock = get_socket(fd);
1012 	if (IS_ERR(sock)) {
1013 		r = PTR_ERR(sock);
1014 		goto err_vq;
1015 	}
1016 
1017 	/* start polling new socket */
1018 	oldsock = vq->private_data;
1019 	if (sock != oldsock) {
1020 		ubufs = vhost_net_ubuf_alloc(vq,
1021 					     sock && vhost_sock_zcopy(sock));
1022 		if (IS_ERR(ubufs)) {
1023 			r = PTR_ERR(ubufs);
1024 			goto err_ubufs;
1025 		}
1026 
1027 		vhost_net_disable_vq(n, vq);
1028 		vq->private_data = sock;
1029 		r = vhost_vq_init_access(vq);
1030 		if (r)
1031 			goto err_used;
1032 		r = vhost_net_enable_vq(n, vq);
1033 		if (r)
1034 			goto err_used;
1035 
1036 		oldubufs = nvq->ubufs;
1037 		nvq->ubufs = ubufs;
1038 
1039 		n->tx_packets = 0;
1040 		n->tx_zcopy_err = 0;
1041 		n->tx_flush = false;
1042 	}
1043 
1044 	mutex_unlock(&vq->mutex);
1045 
1046 	if (oldubufs) {
1047 		vhost_net_ubuf_put_wait_and_free(oldubufs);
1048 		mutex_lock(&vq->mutex);
1049 		vhost_zerocopy_signal_used(n, vq);
1050 		mutex_unlock(&vq->mutex);
1051 	}
1052 
1053 	if (oldsock) {
1054 		vhost_net_flush_vq(n, index);
1055 		sockfd_put(oldsock);
1056 	}
1057 
1058 	mutex_unlock(&n->dev.mutex);
1059 	return 0;
1060 
1061 err_used:
1062 	vq->private_data = oldsock;
1063 	vhost_net_enable_vq(n, vq);
1064 	if (ubufs)
1065 		vhost_net_ubuf_put_wait_and_free(ubufs);
1066 err_ubufs:
1067 	sockfd_put(sock);
1068 err_vq:
1069 	mutex_unlock(&vq->mutex);
1070 err:
1071 	mutex_unlock(&n->dev.mutex);
1072 	return r;
1073 }
1074 
1075 static long vhost_net_reset_owner(struct vhost_net *n)
1076 {
1077 	struct socket *tx_sock = NULL;
1078 	struct socket *rx_sock = NULL;
1079 	long err;
1080 	struct vhost_umem *umem;
1081 
1082 	mutex_lock(&n->dev.mutex);
1083 	err = vhost_dev_check_owner(&n->dev);
1084 	if (err)
1085 		goto done;
1086 	umem = vhost_dev_reset_owner_prepare();
1087 	if (!umem) {
1088 		err = -ENOMEM;
1089 		goto done;
1090 	}
1091 	vhost_net_stop(n, &tx_sock, &rx_sock);
1092 	vhost_net_flush(n);
1093 	vhost_dev_reset_owner(&n->dev, umem);
1094 	vhost_net_vq_reset(n);
1095 done:
1096 	mutex_unlock(&n->dev.mutex);
1097 	if (tx_sock)
1098 		sockfd_put(tx_sock);
1099 	if (rx_sock)
1100 		sockfd_put(rx_sock);
1101 	return err;
1102 }
1103 
1104 static int vhost_net_set_features(struct vhost_net *n, u64 features)
1105 {
1106 	size_t vhost_hlen, sock_hlen, hdr_len;
1107 	int i;
1108 
1109 	hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
1110 			       (1ULL << VIRTIO_F_VERSION_1))) ?
1111 			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1112 			sizeof(struct virtio_net_hdr);
1113 	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1114 		/* vhost provides vnet_hdr */
1115 		vhost_hlen = hdr_len;
1116 		sock_hlen = 0;
1117 	} else {
1118 		/* socket provides vnet_hdr */
1119 		vhost_hlen = 0;
1120 		sock_hlen = hdr_len;
1121 	}
1122 	mutex_lock(&n->dev.mutex);
1123 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1124 	    !vhost_log_access_ok(&n->dev))
1125 		goto out_unlock;
1126 
1127 	if ((features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) {
1128 		if (vhost_init_device_iotlb(&n->dev, true))
1129 			goto out_unlock;
1130 	}
1131 
1132 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1133 		mutex_lock(&n->vqs[i].vq.mutex);
1134 		n->vqs[i].vq.acked_features = features;
1135 		n->vqs[i].vhost_hlen = vhost_hlen;
1136 		n->vqs[i].sock_hlen = sock_hlen;
1137 		mutex_unlock(&n->vqs[i].vq.mutex);
1138 	}
1139 	mutex_unlock(&n->dev.mutex);
1140 	return 0;
1141 
1142 out_unlock:
1143 	mutex_unlock(&n->dev.mutex);
1144 	return -EFAULT;
1145 }
1146 
1147 static long vhost_net_set_owner(struct vhost_net *n)
1148 {
1149 	int r;
1150 
1151 	mutex_lock(&n->dev.mutex);
1152 	if (vhost_dev_has_owner(&n->dev)) {
1153 		r = -EBUSY;
1154 		goto out;
1155 	}
1156 	r = vhost_net_set_ubuf_info(n);
1157 	if (r)
1158 		goto out;
1159 	r = vhost_dev_set_owner(&n->dev);
1160 	if (r)
1161 		vhost_net_clear_ubuf_info(n);
1162 	vhost_net_flush(n);
1163 out:
1164 	mutex_unlock(&n->dev.mutex);
1165 	return r;
1166 }
1167 
1168 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1169 			    unsigned long arg)
1170 {
1171 	struct vhost_net *n = f->private_data;
1172 	void __user *argp = (void __user *)arg;
1173 	u64 __user *featurep = argp;
1174 	struct vhost_vring_file backend;
1175 	u64 features;
1176 	int r;
1177 
1178 	switch (ioctl) {
1179 	case VHOST_NET_SET_BACKEND:
1180 		if (copy_from_user(&backend, argp, sizeof backend))
1181 			return -EFAULT;
1182 		return vhost_net_set_backend(n, backend.index, backend.fd);
1183 	case VHOST_GET_FEATURES:
1184 		features = VHOST_NET_FEATURES;
1185 		if (copy_to_user(featurep, &features, sizeof features))
1186 			return -EFAULT;
1187 		return 0;
1188 	case VHOST_SET_FEATURES:
1189 		if (copy_from_user(&features, featurep, sizeof features))
1190 			return -EFAULT;
1191 		if (features & ~VHOST_NET_FEATURES)
1192 			return -EOPNOTSUPP;
1193 		return vhost_net_set_features(n, features);
1194 	case VHOST_RESET_OWNER:
1195 		return vhost_net_reset_owner(n);
1196 	case VHOST_SET_OWNER:
1197 		return vhost_net_set_owner(n);
1198 	default:
1199 		mutex_lock(&n->dev.mutex);
1200 		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1201 		if (r == -ENOIOCTLCMD)
1202 			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1203 		else
1204 			vhost_net_flush(n);
1205 		mutex_unlock(&n->dev.mutex);
1206 		return r;
1207 	}
1208 }
1209 
1210 #ifdef CONFIG_COMPAT
1211 static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
1212 				   unsigned long arg)
1213 {
1214 	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1215 }
1216 #endif
1217 
1218 static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1219 {
1220 	struct file *file = iocb->ki_filp;
1221 	struct vhost_net *n = file->private_data;
1222 	struct vhost_dev *dev = &n->dev;
1223 	int noblock = file->f_flags & O_NONBLOCK;
1224 
1225 	return vhost_chr_read_iter(dev, to, noblock);
1226 }
1227 
1228 static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
1229 					struct iov_iter *from)
1230 {
1231 	struct file *file = iocb->ki_filp;
1232 	struct vhost_net *n = file->private_data;
1233 	struct vhost_dev *dev = &n->dev;
1234 
1235 	return vhost_chr_write_iter(dev, from);
1236 }
1237 
1238 static unsigned int vhost_net_chr_poll(struct file *file, poll_table *wait)
1239 {
1240 	struct vhost_net *n = file->private_data;
1241 	struct vhost_dev *dev = &n->dev;
1242 
1243 	return vhost_chr_poll(file, dev, wait);
1244 }
1245 
1246 static const struct file_operations vhost_net_fops = {
1247 	.owner          = THIS_MODULE,
1248 	.release        = vhost_net_release,
1249 	.read_iter      = vhost_net_chr_read_iter,
1250 	.write_iter     = vhost_net_chr_write_iter,
1251 	.poll           = vhost_net_chr_poll,
1252 	.unlocked_ioctl = vhost_net_ioctl,
1253 #ifdef CONFIG_COMPAT
1254 	.compat_ioctl   = vhost_net_compat_ioctl,
1255 #endif
1256 	.open           = vhost_net_open,
1257 	.llseek		= noop_llseek,
1258 };
1259 
1260 static struct miscdevice vhost_net_misc = {
1261 	.minor = VHOST_NET_MINOR,
1262 	.name = "vhost-net",
1263 	.fops = &vhost_net_fops,
1264 };
1265 
1266 static int vhost_net_init(void)
1267 {
1268 	if (experimental_zcopytx)
1269 		vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1270 	return misc_register(&vhost_net_misc);
1271 }
1272 module_init(vhost_net_init);
1273 
1274 static void vhost_net_exit(void)
1275 {
1276 	misc_deregister(&vhost_net_misc);
1277 }
1278 module_exit(vhost_net_exit);
1279 
1280 MODULE_VERSION("0.0.1");
1281 MODULE_LICENSE("GPL v2");
1282 MODULE_AUTHOR("Michael S. Tsirkin");
1283 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1284 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1285 MODULE_ALIAS("devname:vhost-net");
1286