xref: /linux/drivers/vhost/net.c (revision 9538aa46c2427d6782aa10036c4da4c541605e0e)
1 /* Copyright (C) 2009 Red Hat, Inc.
2  * Author: Michael S. Tsirkin <mst@redhat.com>
3  *
4  * This work is licensed under the terms of the GNU GPL, version 2.
5  *
6  * virtio-net server in host kernel.
7  */
8 
9 #include <linux/compat.h>
10 #include <linux/eventfd.h>
11 #include <linux/vhost.h>
12 #include <linux/virtio_net.h>
13 #include <linux/miscdevice.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mutex.h>
17 #include <linux/workqueue.h>
18 #include <linux/rcupdate.h>
19 #include <linux/file.h>
20 #include <linux/slab.h>
21 
22 #include <linux/net.h>
23 #include <linux/if_packet.h>
24 #include <linux/if_arp.h>
25 #include <linux/if_tun.h>
26 #include <linux/if_macvlan.h>
27 #include <linux/if_vlan.h>
28 
29 #include <net/sock.h>
30 
31 #include "vhost.h"
32 
33 static int experimental_zcopytx = 1;
34 module_param(experimental_zcopytx, int, 0444);
35 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
36 		                       " 1 -Enable; 0 - Disable");
37 
38 /* Max number of bytes transferred before requeueing the job.
39  * Using this limit prevents one virtqueue from starving others. */
40 #define VHOST_NET_WEIGHT 0x80000
41 
42 /* MAX number of TX used buffers for outstanding zerocopy */
43 #define VHOST_MAX_PEND 128
44 #define VHOST_GOODCOPY_LEN 256
45 
46 /*
47  * For transmit, used buffer len is unused; we override it to track buffer
48  * status internally; used for zerocopy tx only.
49  */
50 /* Lower device DMA failed */
51 #define VHOST_DMA_FAILED_LEN	3
52 /* Lower device DMA done */
53 #define VHOST_DMA_DONE_LEN	2
54 /* Lower device DMA in progress */
55 #define VHOST_DMA_IN_PROGRESS	1
56 /* Buffer unused */
57 #define VHOST_DMA_CLEAR_LEN	0
58 
59 #define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN)
60 
61 enum {
62 	VHOST_NET_FEATURES = VHOST_FEATURES |
63 			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
64 			 (1ULL << VIRTIO_NET_F_MRG_RXBUF),
65 };
66 
67 enum {
68 	VHOST_NET_VQ_RX = 0,
69 	VHOST_NET_VQ_TX = 1,
70 	VHOST_NET_VQ_MAX = 2,
71 };
72 
73 struct vhost_net_ubuf_ref {
74 	struct kref kref;
75 	wait_queue_head_t wait;
76 	struct vhost_virtqueue *vq;
77 };
78 
79 struct vhost_net_virtqueue {
80 	struct vhost_virtqueue vq;
81 	/* hdr is used to store the virtio header.
82 	 * Since each iovec has >= 1 byte length, we never need more than
83 	 * header length entries to store the header. */
84 	struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
85 	size_t vhost_hlen;
86 	size_t sock_hlen;
87 	/* vhost zerocopy support fields below: */
88 	/* last used idx for outstanding DMA zerocopy buffers */
89 	int upend_idx;
90 	/* first used idx for DMA done zerocopy buffers */
91 	int done_idx;
92 	/* an array of userspace buffers info */
93 	struct ubuf_info *ubuf_info;
94 	/* Reference counting for outstanding ubufs.
95 	 * Protected by vq mutex. Writers must also take device mutex. */
96 	struct vhost_net_ubuf_ref *ubufs;
97 };
98 
99 struct vhost_net {
100 	struct vhost_dev dev;
101 	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
102 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
103 	/* Number of TX recently submitted.
104 	 * Protected by tx vq lock. */
105 	unsigned tx_packets;
106 	/* Number of times zerocopy TX recently failed.
107 	 * Protected by tx vq lock. */
108 	unsigned tx_zcopy_err;
109 	/* Flush in progress. Protected by tx vq lock. */
110 	bool tx_flush;
111 };
112 
113 static unsigned vhost_net_zcopy_mask __read_mostly;
114 
115 static void vhost_net_enable_zcopy(int vq)
116 {
117 	vhost_net_zcopy_mask |= 0x1 << vq;
118 }
119 
120 static void vhost_net_zerocopy_done_signal(struct kref *kref)
121 {
122 	struct vhost_net_ubuf_ref *ubufs;
123 
124 	ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
125 	wake_up(&ubufs->wait);
126 }
127 
128 static struct vhost_net_ubuf_ref *
129 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
130 {
131 	struct vhost_net_ubuf_ref *ubufs;
132 	/* No zero copy backend? Nothing to count. */
133 	if (!zcopy)
134 		return NULL;
135 	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
136 	if (!ubufs)
137 		return ERR_PTR(-ENOMEM);
138 	kref_init(&ubufs->kref);
139 	init_waitqueue_head(&ubufs->wait);
140 	ubufs->vq = vq;
141 	return ubufs;
142 }
143 
144 static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
145 {
146 	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
147 }
148 
149 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
150 {
151 	kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
152 	wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
153 }
154 
155 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
156 {
157 	vhost_net_ubuf_put_and_wait(ubufs);
158 	kfree(ubufs);
159 }
160 
161 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
162 {
163 	int i;
164 
165 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
166 		kfree(n->vqs[i].ubuf_info);
167 		n->vqs[i].ubuf_info = NULL;
168 	}
169 }
170 
171 static int vhost_net_set_ubuf_info(struct vhost_net *n)
172 {
173 	bool zcopy;
174 	int i;
175 
176 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
177 		zcopy = vhost_net_zcopy_mask & (0x1 << i);
178 		if (!zcopy)
179 			continue;
180 		n->vqs[i].ubuf_info = kmalloc(sizeof(*n->vqs[i].ubuf_info) *
181 					      UIO_MAXIOV, GFP_KERNEL);
182 		if  (!n->vqs[i].ubuf_info)
183 			goto err;
184 	}
185 	return 0;
186 
187 err:
188 	vhost_net_clear_ubuf_info(n);
189 	return -ENOMEM;
190 }
191 
192 static void vhost_net_vq_reset(struct vhost_net *n)
193 {
194 	int i;
195 
196 	vhost_net_clear_ubuf_info(n);
197 
198 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
199 		n->vqs[i].done_idx = 0;
200 		n->vqs[i].upend_idx = 0;
201 		n->vqs[i].ubufs = NULL;
202 		n->vqs[i].vhost_hlen = 0;
203 		n->vqs[i].sock_hlen = 0;
204 	}
205 
206 }
207 
208 static void vhost_net_tx_packet(struct vhost_net *net)
209 {
210 	++net->tx_packets;
211 	if (net->tx_packets < 1024)
212 		return;
213 	net->tx_packets = 0;
214 	net->tx_zcopy_err = 0;
215 }
216 
217 static void vhost_net_tx_err(struct vhost_net *net)
218 {
219 	++net->tx_zcopy_err;
220 }
221 
222 static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
223 {
224 	/* TX flush waits for outstanding DMAs to be done.
225 	 * Don't start new DMAs.
226 	 */
227 	return !net->tx_flush &&
228 		net->tx_packets / 64 >= net->tx_zcopy_err;
229 }
230 
231 static bool vhost_sock_zcopy(struct socket *sock)
232 {
233 	return unlikely(experimental_zcopytx) &&
234 		sock_flag(sock->sk, SOCK_ZEROCOPY);
235 }
236 
237 /* Pop first len bytes from iovec. Return number of segments used. */
238 static int move_iovec_hdr(struct iovec *from, struct iovec *to,
239 			  size_t len, int iov_count)
240 {
241 	int seg = 0;
242 	size_t size;
243 
244 	while (len && seg < iov_count) {
245 		size = min(from->iov_len, len);
246 		to->iov_base = from->iov_base;
247 		to->iov_len = size;
248 		from->iov_len -= size;
249 		from->iov_base += size;
250 		len -= size;
251 		++from;
252 		++to;
253 		++seg;
254 	}
255 	return seg;
256 }
257 /* Copy iovec entries for len bytes from iovec. */
258 static void copy_iovec_hdr(const struct iovec *from, struct iovec *to,
259 			   size_t len, int iovcount)
260 {
261 	int seg = 0;
262 	size_t size;
263 
264 	while (len && seg < iovcount) {
265 		size = min(from->iov_len, len);
266 		to->iov_base = from->iov_base;
267 		to->iov_len = size;
268 		len -= size;
269 		++from;
270 		++to;
271 		++seg;
272 	}
273 }
274 
275 /* In case of DMA done not in order in lower device driver for some reason.
276  * upend_idx is used to track end of used idx, done_idx is used to track head
277  * of used idx. Once lower device DMA done contiguously, we will signal KVM
278  * guest used idx.
279  */
280 static int vhost_zerocopy_signal_used(struct vhost_net *net,
281 				      struct vhost_virtqueue *vq)
282 {
283 	struct vhost_net_virtqueue *nvq =
284 		container_of(vq, struct vhost_net_virtqueue, vq);
285 	int i;
286 	int j = 0;
287 
288 	for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
289 		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
290 			vhost_net_tx_err(net);
291 		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
292 			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
293 			vhost_add_used_and_signal(vq->dev, vq,
294 						  vq->heads[i].id, 0);
295 			++j;
296 		} else
297 			break;
298 	}
299 	if (j)
300 		nvq->done_idx = i;
301 	return j;
302 }
303 
304 static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
305 {
306 	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
307 	struct vhost_virtqueue *vq = ubufs->vq;
308 	int cnt = atomic_read(&ubufs->kref.refcount);
309 
310 	/*
311 	 * Trigger polling thread if guest stopped submitting new buffers:
312 	 * in this case, the refcount after decrement will eventually reach 1
313 	 * so here it is 2.
314 	 * We also trigger polling periodically after each 16 packets
315 	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
316 	 * less than 10% of times).
317 	 */
318 	if (cnt <= 2 || !(cnt % 16))
319 		vhost_poll_queue(&vq->poll);
320 	/* set len to mark this desc buffers done DMA */
321 	vq->heads[ubuf->desc].len = success ?
322 		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
323 	vhost_net_ubuf_put(ubufs);
324 }
325 
326 /* Expects to be always run from workqueue - which acts as
327  * read-size critical section for our kind of RCU. */
328 static void handle_tx(struct vhost_net *net)
329 {
330 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
331 	struct vhost_virtqueue *vq = &nvq->vq;
332 	unsigned out, in, s;
333 	int head;
334 	struct msghdr msg = {
335 		.msg_name = NULL,
336 		.msg_namelen = 0,
337 		.msg_control = NULL,
338 		.msg_controllen = 0,
339 		.msg_iov = vq->iov,
340 		.msg_flags = MSG_DONTWAIT,
341 	};
342 	size_t len, total_len = 0;
343 	int err;
344 	size_t hdr_size;
345 	struct socket *sock;
346 	struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
347 	bool zcopy, zcopy_used;
348 
349 	/* TODO: check that we are running from vhost_worker? */
350 	sock = rcu_dereference_check(vq->private_data, 1);
351 	if (!sock)
352 		return;
353 
354 	mutex_lock(&vq->mutex);
355 	vhost_disable_notify(&net->dev, vq);
356 
357 	hdr_size = nvq->vhost_hlen;
358 	zcopy = nvq->ubufs;
359 
360 	for (;;) {
361 		/* Release DMAs done buffers first */
362 		if (zcopy)
363 			vhost_zerocopy_signal_used(net, vq);
364 
365 		head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
366 					 ARRAY_SIZE(vq->iov),
367 					 &out, &in,
368 					 NULL, NULL);
369 		/* On error, stop handling until the next kick. */
370 		if (unlikely(head < 0))
371 			break;
372 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
373 		if (head == vq->num) {
374 			int num_pends;
375 
376 			/* If more outstanding DMAs, queue the work.
377 			 * Handle upend_idx wrap around
378 			 */
379 			num_pends = likely(nvq->upend_idx >= nvq->done_idx) ?
380 				    (nvq->upend_idx - nvq->done_idx) :
381 				    (nvq->upend_idx + UIO_MAXIOV -
382 				     nvq->done_idx);
383 			if (unlikely(num_pends > VHOST_MAX_PEND))
384 				break;
385 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
386 				vhost_disable_notify(&net->dev, vq);
387 				continue;
388 			}
389 			break;
390 		}
391 		if (in) {
392 			vq_err(vq, "Unexpected descriptor format for TX: "
393 			       "out %d, int %d\n", out, in);
394 			break;
395 		}
396 		/* Skip header. TODO: support TSO. */
397 		s = move_iovec_hdr(vq->iov, nvq->hdr, hdr_size, out);
398 		msg.msg_iovlen = out;
399 		len = iov_length(vq->iov, out);
400 		/* Sanity check */
401 		if (!len) {
402 			vq_err(vq, "Unexpected header len for TX: "
403 			       "%zd expected %zd\n",
404 			       iov_length(nvq->hdr, s), hdr_size);
405 			break;
406 		}
407 		zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN ||
408 				       nvq->upend_idx != nvq->done_idx);
409 
410 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
411 		if (zcopy_used) {
412 			vq->heads[nvq->upend_idx].id = head;
413 			if (!vhost_net_tx_select_zcopy(net) ||
414 			    len < VHOST_GOODCOPY_LEN) {
415 				/* copy don't need to wait for DMA done */
416 				vq->heads[nvq->upend_idx].len =
417 							VHOST_DMA_DONE_LEN;
418 				msg.msg_control = NULL;
419 				msg.msg_controllen = 0;
420 				ubufs = NULL;
421 			} else {
422 				struct ubuf_info *ubuf;
423 				ubuf = nvq->ubuf_info + nvq->upend_idx;
424 
425 				vq->heads[nvq->upend_idx].len =
426 					VHOST_DMA_IN_PROGRESS;
427 				ubuf->callback = vhost_zerocopy_callback;
428 				ubuf->ctx = nvq->ubufs;
429 				ubuf->desc = nvq->upend_idx;
430 				msg.msg_control = ubuf;
431 				msg.msg_controllen = sizeof(ubuf);
432 				ubufs = nvq->ubufs;
433 				kref_get(&ubufs->kref);
434 			}
435 			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
436 		} else
437 			msg.msg_control = NULL;
438 		/* TODO: Check specific error and bomb out unless ENOBUFS? */
439 		err = sock->ops->sendmsg(NULL, sock, &msg, len);
440 		if (unlikely(err < 0)) {
441 			if (zcopy_used) {
442 				if (ubufs)
443 					vhost_net_ubuf_put(ubufs);
444 				nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
445 					% UIO_MAXIOV;
446 			}
447 			vhost_discard_vq_desc(vq, 1);
448 			break;
449 		}
450 		if (err != len)
451 			pr_debug("Truncated TX packet: "
452 				 " len %d != %zd\n", err, len);
453 		if (!zcopy_used)
454 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
455 		else
456 			vhost_zerocopy_signal_used(net, vq);
457 		total_len += len;
458 		vhost_net_tx_packet(net);
459 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
460 			vhost_poll_queue(&vq->poll);
461 			break;
462 		}
463 	}
464 
465 	mutex_unlock(&vq->mutex);
466 }
467 
468 static int peek_head_len(struct sock *sk)
469 {
470 	struct sk_buff *head;
471 	int len = 0;
472 	unsigned long flags;
473 
474 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
475 	head = skb_peek(&sk->sk_receive_queue);
476 	if (likely(head)) {
477 		len = head->len;
478 		if (vlan_tx_tag_present(head))
479 			len += VLAN_HLEN;
480 	}
481 
482 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
483 	return len;
484 }
485 
486 /* This is a multi-buffer version of vhost_get_desc, that works if
487  *	vq has read descriptors only.
488  * @vq		- the relevant virtqueue
489  * @datalen	- data length we'll be reading
490  * @iovcount	- returned count of io vectors we fill
491  * @log		- vhost log
492  * @log_num	- log offset
493  * @quota       - headcount quota, 1 for big buffer
494  *	returns number of buffer heads allocated, negative on error
495  */
496 static int get_rx_bufs(struct vhost_virtqueue *vq,
497 		       struct vring_used_elem *heads,
498 		       int datalen,
499 		       unsigned *iovcount,
500 		       struct vhost_log *log,
501 		       unsigned *log_num,
502 		       unsigned int quota)
503 {
504 	unsigned int out, in;
505 	int seg = 0;
506 	int headcount = 0;
507 	unsigned d;
508 	int r, nlogs = 0;
509 
510 	while (datalen > 0 && headcount < quota) {
511 		if (unlikely(seg >= UIO_MAXIOV)) {
512 			r = -ENOBUFS;
513 			goto err;
514 		}
515 		d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
516 				      ARRAY_SIZE(vq->iov) - seg, &out,
517 				      &in, log, log_num);
518 		if (d == vq->num) {
519 			r = 0;
520 			goto err;
521 		}
522 		if (unlikely(out || in <= 0)) {
523 			vq_err(vq, "unexpected descriptor format for RX: "
524 				"out %d, in %d\n", out, in);
525 			r = -EINVAL;
526 			goto err;
527 		}
528 		if (unlikely(log)) {
529 			nlogs += *log_num;
530 			log += *log_num;
531 		}
532 		heads[headcount].id = d;
533 		heads[headcount].len = iov_length(vq->iov + seg, in);
534 		datalen -= heads[headcount].len;
535 		++headcount;
536 		seg += in;
537 	}
538 	heads[headcount - 1].len += datalen;
539 	*iovcount = seg;
540 	if (unlikely(log))
541 		*log_num = nlogs;
542 	return headcount;
543 err:
544 	vhost_discard_vq_desc(vq, headcount);
545 	return r;
546 }
547 
548 /* Expects to be always run from workqueue - which acts as
549  * read-size critical section for our kind of RCU. */
550 static void handle_rx(struct vhost_net *net)
551 {
552 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
553 	struct vhost_virtqueue *vq = &nvq->vq;
554 	unsigned uninitialized_var(in), log;
555 	struct vhost_log *vq_log;
556 	struct msghdr msg = {
557 		.msg_name = NULL,
558 		.msg_namelen = 0,
559 		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
560 		.msg_controllen = 0,
561 		.msg_iov = vq->iov,
562 		.msg_flags = MSG_DONTWAIT,
563 	};
564 	struct virtio_net_hdr_mrg_rxbuf hdr = {
565 		.hdr.flags = 0,
566 		.hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE
567 	};
568 	size_t total_len = 0;
569 	int err, mergeable;
570 	s16 headcount;
571 	size_t vhost_hlen, sock_hlen;
572 	size_t vhost_len, sock_len;
573 	/* TODO: check that we are running from vhost_worker? */
574 	struct socket *sock = rcu_dereference_check(vq->private_data, 1);
575 
576 	if (!sock)
577 		return;
578 
579 	mutex_lock(&vq->mutex);
580 	vhost_disable_notify(&net->dev, vq);
581 	vhost_hlen = nvq->vhost_hlen;
582 	sock_hlen = nvq->sock_hlen;
583 
584 	vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
585 		vq->log : NULL;
586 	mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF);
587 
588 	while ((sock_len = peek_head_len(sock->sk))) {
589 		sock_len += sock_hlen;
590 		vhost_len = sock_len + vhost_hlen;
591 		headcount = get_rx_bufs(vq, vq->heads, vhost_len,
592 					&in, vq_log, &log,
593 					likely(mergeable) ? UIO_MAXIOV : 1);
594 		/* On error, stop handling until the next kick. */
595 		if (unlikely(headcount < 0))
596 			break;
597 		/* OK, now we need to know about added descriptors. */
598 		if (!headcount) {
599 			if (unlikely(vhost_enable_notify(&net->dev, vq))) {
600 				/* They have slipped one in as we were
601 				 * doing that: check again. */
602 				vhost_disable_notify(&net->dev, vq);
603 				continue;
604 			}
605 			/* Nothing new?  Wait for eventfd to tell us
606 			 * they refilled. */
607 			break;
608 		}
609 		/* We don't need to be notified again. */
610 		if (unlikely((vhost_hlen)))
611 			/* Skip header. TODO: support TSO. */
612 			move_iovec_hdr(vq->iov, nvq->hdr, vhost_hlen, in);
613 		else
614 			/* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF:
615 			 * needed because recvmsg can modify msg_iov. */
616 			copy_iovec_hdr(vq->iov, nvq->hdr, sock_hlen, in);
617 		msg.msg_iovlen = in;
618 		err = sock->ops->recvmsg(NULL, sock, &msg,
619 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
620 		/* Userspace might have consumed the packet meanwhile:
621 		 * it's not supposed to do this usually, but might be hard
622 		 * to prevent. Discard data we got (if any) and keep going. */
623 		if (unlikely(err != sock_len)) {
624 			pr_debug("Discarded rx packet: "
625 				 " len %d, expected %zd\n", err, sock_len);
626 			vhost_discard_vq_desc(vq, headcount);
627 			continue;
628 		}
629 		if (unlikely(vhost_hlen) &&
630 		    memcpy_toiovecend(nvq->hdr, (unsigned char *)&hdr, 0,
631 				      vhost_hlen)) {
632 			vq_err(vq, "Unable to write vnet_hdr at addr %p\n",
633 			       vq->iov->iov_base);
634 			break;
635 		}
636 		/* TODO: Should check and handle checksum. */
637 		if (likely(mergeable) &&
638 		    memcpy_toiovecend(nvq->hdr, (unsigned char *)&headcount,
639 				      offsetof(typeof(hdr), num_buffers),
640 				      sizeof hdr.num_buffers)) {
641 			vq_err(vq, "Failed num_buffers write");
642 			vhost_discard_vq_desc(vq, headcount);
643 			break;
644 		}
645 		vhost_add_used_and_signal_n(&net->dev, vq, vq->heads,
646 					    headcount);
647 		if (unlikely(vq_log))
648 			vhost_log_write(vq, vq_log, log, vhost_len);
649 		total_len += vhost_len;
650 		if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
651 			vhost_poll_queue(&vq->poll);
652 			break;
653 		}
654 	}
655 
656 	mutex_unlock(&vq->mutex);
657 }
658 
659 static void handle_tx_kick(struct vhost_work *work)
660 {
661 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
662 						  poll.work);
663 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
664 
665 	handle_tx(net);
666 }
667 
668 static void handle_rx_kick(struct vhost_work *work)
669 {
670 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
671 						  poll.work);
672 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
673 
674 	handle_rx(net);
675 }
676 
677 static void handle_tx_net(struct vhost_work *work)
678 {
679 	struct vhost_net *net = container_of(work, struct vhost_net,
680 					     poll[VHOST_NET_VQ_TX].work);
681 	handle_tx(net);
682 }
683 
684 static void handle_rx_net(struct vhost_work *work)
685 {
686 	struct vhost_net *net = container_of(work, struct vhost_net,
687 					     poll[VHOST_NET_VQ_RX].work);
688 	handle_rx(net);
689 }
690 
691 static int vhost_net_open(struct inode *inode, struct file *f)
692 {
693 	struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
694 	struct vhost_dev *dev;
695 	struct vhost_virtqueue **vqs;
696 	int r, i;
697 
698 	if (!n)
699 		return -ENOMEM;
700 	vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
701 	if (!vqs) {
702 		kfree(n);
703 		return -ENOMEM;
704 	}
705 
706 	dev = &n->dev;
707 	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
708 	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
709 	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
710 	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
711 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
712 		n->vqs[i].ubufs = NULL;
713 		n->vqs[i].ubuf_info = NULL;
714 		n->vqs[i].upend_idx = 0;
715 		n->vqs[i].done_idx = 0;
716 		n->vqs[i].vhost_hlen = 0;
717 		n->vqs[i].sock_hlen = 0;
718 	}
719 	r = vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX);
720 	if (r < 0) {
721 		kfree(n);
722 		kfree(vqs);
723 		return r;
724 	}
725 
726 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev);
727 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev);
728 
729 	f->private_data = n;
730 
731 	return 0;
732 }
733 
734 static void vhost_net_disable_vq(struct vhost_net *n,
735 				 struct vhost_virtqueue *vq)
736 {
737 	struct vhost_net_virtqueue *nvq =
738 		container_of(vq, struct vhost_net_virtqueue, vq);
739 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
740 	if (!vq->private_data)
741 		return;
742 	vhost_poll_stop(poll);
743 }
744 
745 static int vhost_net_enable_vq(struct vhost_net *n,
746 				struct vhost_virtqueue *vq)
747 {
748 	struct vhost_net_virtqueue *nvq =
749 		container_of(vq, struct vhost_net_virtqueue, vq);
750 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
751 	struct socket *sock;
752 
753 	sock = rcu_dereference_protected(vq->private_data,
754 					 lockdep_is_held(&vq->mutex));
755 	if (!sock)
756 		return 0;
757 
758 	return vhost_poll_start(poll, sock->file);
759 }
760 
761 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
762 					struct vhost_virtqueue *vq)
763 {
764 	struct socket *sock;
765 
766 	mutex_lock(&vq->mutex);
767 	sock = rcu_dereference_protected(vq->private_data,
768 					 lockdep_is_held(&vq->mutex));
769 	vhost_net_disable_vq(n, vq);
770 	rcu_assign_pointer(vq->private_data, NULL);
771 	mutex_unlock(&vq->mutex);
772 	return sock;
773 }
774 
775 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
776 			   struct socket **rx_sock)
777 {
778 	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
779 	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
780 }
781 
782 static void vhost_net_flush_vq(struct vhost_net *n, int index)
783 {
784 	vhost_poll_flush(n->poll + index);
785 	vhost_poll_flush(&n->vqs[index].vq.poll);
786 }
787 
788 static void vhost_net_flush(struct vhost_net *n)
789 {
790 	vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
791 	vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
792 	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
793 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
794 		n->tx_flush = true;
795 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
796 		/* Wait for all lower device DMAs done. */
797 		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
798 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
799 		n->tx_flush = false;
800 		kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
801 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
802 	}
803 }
804 
805 static int vhost_net_release(struct inode *inode, struct file *f)
806 {
807 	struct vhost_net *n = f->private_data;
808 	struct socket *tx_sock;
809 	struct socket *rx_sock;
810 
811 	vhost_net_stop(n, &tx_sock, &rx_sock);
812 	vhost_net_flush(n);
813 	vhost_dev_stop(&n->dev);
814 	vhost_dev_cleanup(&n->dev, false);
815 	vhost_net_vq_reset(n);
816 	if (tx_sock)
817 		fput(tx_sock->file);
818 	if (rx_sock)
819 		fput(rx_sock->file);
820 	/* We do an extra flush before freeing memory,
821 	 * since jobs can re-queue themselves. */
822 	vhost_net_flush(n);
823 	kfree(n->dev.vqs);
824 	kfree(n);
825 	return 0;
826 }
827 
828 static struct socket *get_raw_socket(int fd)
829 {
830 	struct {
831 		struct sockaddr_ll sa;
832 		char  buf[MAX_ADDR_LEN];
833 	} uaddr;
834 	int uaddr_len = sizeof uaddr, r;
835 	struct socket *sock = sockfd_lookup(fd, &r);
836 
837 	if (!sock)
838 		return ERR_PTR(-ENOTSOCK);
839 
840 	/* Parameter checking */
841 	if (sock->sk->sk_type != SOCK_RAW) {
842 		r = -ESOCKTNOSUPPORT;
843 		goto err;
844 	}
845 
846 	r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
847 			       &uaddr_len, 0);
848 	if (r)
849 		goto err;
850 
851 	if (uaddr.sa.sll_family != AF_PACKET) {
852 		r = -EPFNOSUPPORT;
853 		goto err;
854 	}
855 	return sock;
856 err:
857 	fput(sock->file);
858 	return ERR_PTR(r);
859 }
860 
861 static struct socket *get_tap_socket(int fd)
862 {
863 	struct file *file = fget(fd);
864 	struct socket *sock;
865 
866 	if (!file)
867 		return ERR_PTR(-EBADF);
868 	sock = tun_get_socket(file);
869 	if (!IS_ERR(sock))
870 		return sock;
871 	sock = macvtap_get_socket(file);
872 	if (IS_ERR(sock))
873 		fput(file);
874 	return sock;
875 }
876 
877 static struct socket *get_socket(int fd)
878 {
879 	struct socket *sock;
880 
881 	/* special case to disable backend */
882 	if (fd == -1)
883 		return NULL;
884 	sock = get_raw_socket(fd);
885 	if (!IS_ERR(sock))
886 		return sock;
887 	sock = get_tap_socket(fd);
888 	if (!IS_ERR(sock))
889 		return sock;
890 	return ERR_PTR(-ENOTSOCK);
891 }
892 
893 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
894 {
895 	struct socket *sock, *oldsock;
896 	struct vhost_virtqueue *vq;
897 	struct vhost_net_virtqueue *nvq;
898 	struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
899 	int r;
900 
901 	mutex_lock(&n->dev.mutex);
902 	r = vhost_dev_check_owner(&n->dev);
903 	if (r)
904 		goto err;
905 
906 	if (index >= VHOST_NET_VQ_MAX) {
907 		r = -ENOBUFS;
908 		goto err;
909 	}
910 	vq = &n->vqs[index].vq;
911 	nvq = &n->vqs[index];
912 	mutex_lock(&vq->mutex);
913 
914 	/* Verify that ring has been setup correctly. */
915 	if (!vhost_vq_access_ok(vq)) {
916 		r = -EFAULT;
917 		goto err_vq;
918 	}
919 	sock = get_socket(fd);
920 	if (IS_ERR(sock)) {
921 		r = PTR_ERR(sock);
922 		goto err_vq;
923 	}
924 
925 	/* start polling new socket */
926 	oldsock = rcu_dereference_protected(vq->private_data,
927 					    lockdep_is_held(&vq->mutex));
928 	if (sock != oldsock) {
929 		ubufs = vhost_net_ubuf_alloc(vq,
930 					     sock && vhost_sock_zcopy(sock));
931 		if (IS_ERR(ubufs)) {
932 			r = PTR_ERR(ubufs);
933 			goto err_ubufs;
934 		}
935 
936 		vhost_net_disable_vq(n, vq);
937 		rcu_assign_pointer(vq->private_data, sock);
938 		r = vhost_init_used(vq);
939 		if (r)
940 			goto err_used;
941 		r = vhost_net_enable_vq(n, vq);
942 		if (r)
943 			goto err_used;
944 
945 		oldubufs = nvq->ubufs;
946 		nvq->ubufs = ubufs;
947 
948 		n->tx_packets = 0;
949 		n->tx_zcopy_err = 0;
950 		n->tx_flush = false;
951 	}
952 
953 	mutex_unlock(&vq->mutex);
954 
955 	if (oldubufs) {
956 		vhost_net_ubuf_put_wait_and_free(oldubufs);
957 		mutex_lock(&vq->mutex);
958 		vhost_zerocopy_signal_used(n, vq);
959 		mutex_unlock(&vq->mutex);
960 	}
961 
962 	if (oldsock) {
963 		vhost_net_flush_vq(n, index);
964 		fput(oldsock->file);
965 	}
966 
967 	mutex_unlock(&n->dev.mutex);
968 	return 0;
969 
970 err_used:
971 	rcu_assign_pointer(vq->private_data, oldsock);
972 	vhost_net_enable_vq(n, vq);
973 	if (ubufs)
974 		vhost_net_ubuf_put_wait_and_free(ubufs);
975 err_ubufs:
976 	fput(sock->file);
977 err_vq:
978 	mutex_unlock(&vq->mutex);
979 err:
980 	mutex_unlock(&n->dev.mutex);
981 	return r;
982 }
983 
984 static long vhost_net_reset_owner(struct vhost_net *n)
985 {
986 	struct socket *tx_sock = NULL;
987 	struct socket *rx_sock = NULL;
988 	long err;
989 	struct vhost_memory *memory;
990 
991 	mutex_lock(&n->dev.mutex);
992 	err = vhost_dev_check_owner(&n->dev);
993 	if (err)
994 		goto done;
995 	memory = vhost_dev_reset_owner_prepare();
996 	if (!memory) {
997 		err = -ENOMEM;
998 		goto done;
999 	}
1000 	vhost_net_stop(n, &tx_sock, &rx_sock);
1001 	vhost_net_flush(n);
1002 	vhost_dev_reset_owner(&n->dev, memory);
1003 	vhost_net_vq_reset(n);
1004 done:
1005 	mutex_unlock(&n->dev.mutex);
1006 	if (tx_sock)
1007 		fput(tx_sock->file);
1008 	if (rx_sock)
1009 		fput(rx_sock->file);
1010 	return err;
1011 }
1012 
1013 static int vhost_net_set_features(struct vhost_net *n, u64 features)
1014 {
1015 	size_t vhost_hlen, sock_hlen, hdr_len;
1016 	int i;
1017 
1018 	hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ?
1019 			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1020 			sizeof(struct virtio_net_hdr);
1021 	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1022 		/* vhost provides vnet_hdr */
1023 		vhost_hlen = hdr_len;
1024 		sock_hlen = 0;
1025 	} else {
1026 		/* socket provides vnet_hdr */
1027 		vhost_hlen = 0;
1028 		sock_hlen = hdr_len;
1029 	}
1030 	mutex_lock(&n->dev.mutex);
1031 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1032 	    !vhost_log_access_ok(&n->dev)) {
1033 		mutex_unlock(&n->dev.mutex);
1034 		return -EFAULT;
1035 	}
1036 	n->dev.acked_features = features;
1037 	smp_wmb();
1038 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1039 		mutex_lock(&n->vqs[i].vq.mutex);
1040 		n->vqs[i].vhost_hlen = vhost_hlen;
1041 		n->vqs[i].sock_hlen = sock_hlen;
1042 		mutex_unlock(&n->vqs[i].vq.mutex);
1043 	}
1044 	vhost_net_flush(n);
1045 	mutex_unlock(&n->dev.mutex);
1046 	return 0;
1047 }
1048 
1049 static long vhost_net_set_owner(struct vhost_net *n)
1050 {
1051 	int r;
1052 
1053 	mutex_lock(&n->dev.mutex);
1054 	if (vhost_dev_has_owner(&n->dev)) {
1055 		r = -EBUSY;
1056 		goto out;
1057 	}
1058 	r = vhost_net_set_ubuf_info(n);
1059 	if (r)
1060 		goto out;
1061 	r = vhost_dev_set_owner(&n->dev);
1062 	if (r)
1063 		vhost_net_clear_ubuf_info(n);
1064 	vhost_net_flush(n);
1065 out:
1066 	mutex_unlock(&n->dev.mutex);
1067 	return r;
1068 }
1069 
1070 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1071 			    unsigned long arg)
1072 {
1073 	struct vhost_net *n = f->private_data;
1074 	void __user *argp = (void __user *)arg;
1075 	u64 __user *featurep = argp;
1076 	struct vhost_vring_file backend;
1077 	u64 features;
1078 	int r;
1079 
1080 	switch (ioctl) {
1081 	case VHOST_NET_SET_BACKEND:
1082 		if (copy_from_user(&backend, argp, sizeof backend))
1083 			return -EFAULT;
1084 		return vhost_net_set_backend(n, backend.index, backend.fd);
1085 	case VHOST_GET_FEATURES:
1086 		features = VHOST_NET_FEATURES;
1087 		if (copy_to_user(featurep, &features, sizeof features))
1088 			return -EFAULT;
1089 		return 0;
1090 	case VHOST_SET_FEATURES:
1091 		if (copy_from_user(&features, featurep, sizeof features))
1092 			return -EFAULT;
1093 		if (features & ~VHOST_NET_FEATURES)
1094 			return -EOPNOTSUPP;
1095 		return vhost_net_set_features(n, features);
1096 	case VHOST_RESET_OWNER:
1097 		return vhost_net_reset_owner(n);
1098 	case VHOST_SET_OWNER:
1099 		return vhost_net_set_owner(n);
1100 	default:
1101 		mutex_lock(&n->dev.mutex);
1102 		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1103 		if (r == -ENOIOCTLCMD)
1104 			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1105 		else
1106 			vhost_net_flush(n);
1107 		mutex_unlock(&n->dev.mutex);
1108 		return r;
1109 	}
1110 }
1111 
1112 #ifdef CONFIG_COMPAT
1113 static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
1114 				   unsigned long arg)
1115 {
1116 	return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
1117 }
1118 #endif
1119 
1120 static const struct file_operations vhost_net_fops = {
1121 	.owner          = THIS_MODULE,
1122 	.release        = vhost_net_release,
1123 	.unlocked_ioctl = vhost_net_ioctl,
1124 #ifdef CONFIG_COMPAT
1125 	.compat_ioctl   = vhost_net_compat_ioctl,
1126 #endif
1127 	.open           = vhost_net_open,
1128 	.llseek		= noop_llseek,
1129 };
1130 
1131 static struct miscdevice vhost_net_misc = {
1132 	.minor = VHOST_NET_MINOR,
1133 	.name = "vhost-net",
1134 	.fops = &vhost_net_fops,
1135 };
1136 
1137 static int vhost_net_init(void)
1138 {
1139 	if (experimental_zcopytx)
1140 		vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1141 	return misc_register(&vhost_net_misc);
1142 }
1143 module_init(vhost_net_init);
1144 
1145 static void vhost_net_exit(void)
1146 {
1147 	misc_deregister(&vhost_net_misc);
1148 }
1149 module_exit(vhost_net_exit);
1150 
1151 MODULE_VERSION("0.0.1");
1152 MODULE_LICENSE("GPL v2");
1153 MODULE_AUTHOR("Michael S. Tsirkin");
1154 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1155 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1156 MODULE_ALIAS("devname:vhost-net");
1157