xref: /linux/drivers/vhost/net.c (revision 25489a4f556414445d342951615178368ee45cde)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2009 Red Hat, Inc.
3  * Author: Michael S. Tsirkin <mst@redhat.com>
4  *
5  * virtio-net server in host kernel.
6  */
7 
8 #include <linux/compat.h>
9 #include <linux/eventfd.h>
10 #include <linux/vhost.h>
11 #include <linux/virtio_net.h>
12 #include <linux/miscdevice.h>
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15 #include <linux/mutex.h>
16 #include <linux/workqueue.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/sched/clock.h>
20 #include <linux/sched/signal.h>
21 #include <linux/vmalloc.h>
22 
23 #include <linux/net.h>
24 #include <linux/if_packet.h>
25 #include <linux/if_arp.h>
26 #include <linux/if_tun.h>
27 #include <linux/if_macvlan.h>
28 #include <linux/if_tap.h>
29 #include <linux/if_vlan.h>
30 #include <linux/skb_array.h>
31 #include <linux/skbuff.h>
32 
33 #include <net/sock.h>
34 #include <net/xdp.h>
35 
36 #include "vhost.h"
37 
38 static int experimental_zcopytx = 0;
39 module_param(experimental_zcopytx, int, 0444);
40 MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
41 		                       " 1 -Enable; 0 - Disable");
42 
43 /* Max number of bytes transferred before requeueing the job.
44  * Using this limit prevents one virtqueue from starving others. */
45 #define VHOST_NET_WEIGHT 0x80000
46 
47 /* Max number of packets transferred before requeueing the job.
48  * Using this limit prevents one virtqueue from starving others with small
49  * pkts.
50  */
51 #define VHOST_NET_PKT_WEIGHT 256
52 
53 /* MAX number of TX used buffers for outstanding zerocopy */
54 #define VHOST_MAX_PEND 128
55 #define VHOST_GOODCOPY_LEN 256
56 
57 /*
58  * For transmit, used buffer len is unused; we override it to track buffer
59  * status internally; used for zerocopy tx only.
60  */
61 /* Lower device DMA failed */
62 #define VHOST_DMA_FAILED_LEN	((__force __virtio32)3)
63 /* Lower device DMA done */
64 #define VHOST_DMA_DONE_LEN	((__force __virtio32)2)
65 /* Lower device DMA in progress */
66 #define VHOST_DMA_IN_PROGRESS	((__force __virtio32)1)
67 /* Buffer unused */
68 #define VHOST_DMA_CLEAR_LEN	((__force __virtio32)0)
69 
70 #define VHOST_DMA_IS_DONE(len) ((__force u32)(len) >= (__force u32)VHOST_DMA_DONE_LEN)
71 
72 enum {
73 	VHOST_NET_FEATURES = VHOST_FEATURES |
74 			 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
75 			 (1ULL << VIRTIO_NET_F_MRG_RXBUF) |
76 			 (1ULL << VIRTIO_F_ACCESS_PLATFORM) |
77 			 (1ULL << VIRTIO_F_RING_RESET)
78 };
79 
80 enum {
81 	VHOST_NET_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
82 };
83 
84 enum {
85 	VHOST_NET_VQ_RX = 0,
86 	VHOST_NET_VQ_TX = 1,
87 	VHOST_NET_VQ_MAX = 2,
88 };
89 
90 struct vhost_net_ubuf_ref {
91 	/* refcount follows semantics similar to kref:
92 	 *  0: object is released
93 	 *  1: no outstanding ubufs
94 	 * >1: outstanding ubufs
95 	 */
96 	atomic_t refcount;
97 	wait_queue_head_t wait;
98 	struct vhost_virtqueue *vq;
99 };
100 
101 #define VHOST_NET_BATCH 64
102 struct vhost_net_buf {
103 	void **queue;
104 	int tail;
105 	int head;
106 };
107 
108 struct vhost_net_virtqueue {
109 	struct vhost_virtqueue vq;
110 	size_t vhost_hlen;
111 	size_t sock_hlen;
112 	/* vhost zerocopy support fields below: */
113 	/* last used idx for outstanding DMA zerocopy buffers */
114 	int upend_idx;
115 	/* For TX, first used idx for DMA done zerocopy buffers
116 	 * For RX, number of batched heads
117 	 */
118 	int done_idx;
119 	/* Number of XDP frames batched */
120 	int batched_xdp;
121 	/* an array of userspace buffers info */
122 	struct ubuf_info_msgzc *ubuf_info;
123 	/* Reference counting for outstanding ubufs.
124 	 * Protected by vq mutex. Writers must also take device mutex. */
125 	struct vhost_net_ubuf_ref *ubufs;
126 	struct ptr_ring *rx_ring;
127 	struct vhost_net_buf rxq;
128 	/* Batched XDP buffs */
129 	struct xdp_buff *xdp;
130 };
131 
132 struct vhost_net {
133 	struct vhost_dev dev;
134 	struct vhost_net_virtqueue vqs[VHOST_NET_VQ_MAX];
135 	struct vhost_poll poll[VHOST_NET_VQ_MAX];
136 	/* Number of TX recently submitted.
137 	 * Protected by tx vq lock. */
138 	unsigned tx_packets;
139 	/* Number of times zerocopy TX recently failed.
140 	 * Protected by tx vq lock. */
141 	unsigned tx_zcopy_err;
142 	/* Flush in progress. Protected by tx vq lock. */
143 	bool tx_flush;
144 	/* Private page frag cache */
145 	struct page_frag_cache pf_cache;
146 };
147 
148 static unsigned vhost_net_zcopy_mask __read_mostly;
149 
150 static void *vhost_net_buf_get_ptr(struct vhost_net_buf *rxq)
151 {
152 	if (rxq->tail != rxq->head)
153 		return rxq->queue[rxq->head];
154 	else
155 		return NULL;
156 }
157 
158 static int vhost_net_buf_get_size(struct vhost_net_buf *rxq)
159 {
160 	return rxq->tail - rxq->head;
161 }
162 
163 static int vhost_net_buf_is_empty(struct vhost_net_buf *rxq)
164 {
165 	return rxq->tail == rxq->head;
166 }
167 
168 static void *vhost_net_buf_consume(struct vhost_net_buf *rxq)
169 {
170 	void *ret = vhost_net_buf_get_ptr(rxq);
171 	++rxq->head;
172 	return ret;
173 }
174 
175 static int vhost_net_buf_produce(struct vhost_net_virtqueue *nvq)
176 {
177 	struct vhost_net_buf *rxq = &nvq->rxq;
178 
179 	rxq->head = 0;
180 	rxq->tail = ptr_ring_consume_batched(nvq->rx_ring, rxq->queue,
181 					      VHOST_NET_BATCH);
182 	return rxq->tail;
183 }
184 
185 static void vhost_net_buf_unproduce(struct vhost_net_virtqueue *nvq)
186 {
187 	struct vhost_net_buf *rxq = &nvq->rxq;
188 
189 	if (nvq->rx_ring && !vhost_net_buf_is_empty(rxq)) {
190 		ptr_ring_unconsume(nvq->rx_ring, rxq->queue + rxq->head,
191 				   vhost_net_buf_get_size(rxq),
192 				   tun_ptr_free);
193 		rxq->head = rxq->tail = 0;
194 	}
195 }
196 
197 static int vhost_net_buf_peek_len(void *ptr)
198 {
199 	if (tun_is_xdp_frame(ptr)) {
200 		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
201 
202 		return xdpf->len;
203 	}
204 
205 	return __skb_array_len_with_tag(ptr);
206 }
207 
208 static int vhost_net_buf_peek(struct vhost_net_virtqueue *nvq)
209 {
210 	struct vhost_net_buf *rxq = &nvq->rxq;
211 
212 	if (!vhost_net_buf_is_empty(rxq))
213 		goto out;
214 
215 	if (!vhost_net_buf_produce(nvq))
216 		return 0;
217 
218 out:
219 	return vhost_net_buf_peek_len(vhost_net_buf_get_ptr(rxq));
220 }
221 
222 static void vhost_net_buf_init(struct vhost_net_buf *rxq)
223 {
224 	rxq->head = rxq->tail = 0;
225 }
226 
227 static void vhost_net_enable_zcopy(int vq)
228 {
229 	vhost_net_zcopy_mask |= 0x1 << vq;
230 }
231 
232 static struct vhost_net_ubuf_ref *
233 vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
234 {
235 	struct vhost_net_ubuf_ref *ubufs;
236 	/* No zero copy backend? Nothing to count. */
237 	if (!zcopy)
238 		return NULL;
239 	ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
240 	if (!ubufs)
241 		return ERR_PTR(-ENOMEM);
242 	atomic_set(&ubufs->refcount, 1);
243 	init_waitqueue_head(&ubufs->wait);
244 	ubufs->vq = vq;
245 	return ubufs;
246 }
247 
248 static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
249 {
250 	int r = atomic_sub_return(1, &ubufs->refcount);
251 	if (unlikely(!r))
252 		wake_up(&ubufs->wait);
253 	return r;
254 }
255 
256 static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
257 {
258 	vhost_net_ubuf_put(ubufs);
259 	wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
260 }
261 
262 static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
263 {
264 	vhost_net_ubuf_put_and_wait(ubufs);
265 	kfree(ubufs);
266 }
267 
268 static void vhost_net_clear_ubuf_info(struct vhost_net *n)
269 {
270 	int i;
271 
272 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
273 		kfree(n->vqs[i].ubuf_info);
274 		n->vqs[i].ubuf_info = NULL;
275 	}
276 }
277 
278 static int vhost_net_set_ubuf_info(struct vhost_net *n)
279 {
280 	bool zcopy;
281 	int i;
282 
283 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
284 		zcopy = vhost_net_zcopy_mask & (0x1 << i);
285 		if (!zcopy)
286 			continue;
287 		n->vqs[i].ubuf_info =
288 			kmalloc_array(UIO_MAXIOV,
289 				      sizeof(*n->vqs[i].ubuf_info),
290 				      GFP_KERNEL);
291 		if  (!n->vqs[i].ubuf_info)
292 			goto err;
293 	}
294 	return 0;
295 
296 err:
297 	vhost_net_clear_ubuf_info(n);
298 	return -ENOMEM;
299 }
300 
301 static void vhost_net_vq_reset(struct vhost_net *n)
302 {
303 	int i;
304 
305 	vhost_net_clear_ubuf_info(n);
306 
307 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
308 		n->vqs[i].done_idx = 0;
309 		n->vqs[i].upend_idx = 0;
310 		n->vqs[i].ubufs = NULL;
311 		n->vqs[i].vhost_hlen = 0;
312 		n->vqs[i].sock_hlen = 0;
313 		vhost_net_buf_init(&n->vqs[i].rxq);
314 	}
315 
316 }
317 
318 static void vhost_net_tx_packet(struct vhost_net *net)
319 {
320 	++net->tx_packets;
321 	if (net->tx_packets < 1024)
322 		return;
323 	net->tx_packets = 0;
324 	net->tx_zcopy_err = 0;
325 }
326 
327 static void vhost_net_tx_err(struct vhost_net *net)
328 {
329 	++net->tx_zcopy_err;
330 }
331 
332 static bool vhost_net_tx_select_zcopy(struct vhost_net *net)
333 {
334 	/* TX flush waits for outstanding DMAs to be done.
335 	 * Don't start new DMAs.
336 	 */
337 	return !net->tx_flush &&
338 		net->tx_packets / 64 >= net->tx_zcopy_err;
339 }
340 
341 static bool vhost_sock_zcopy(struct socket *sock)
342 {
343 	return unlikely(experimental_zcopytx) &&
344 		sock_flag(sock->sk, SOCK_ZEROCOPY);
345 }
346 
347 static bool vhost_sock_xdp(struct socket *sock)
348 {
349 	return sock_flag(sock->sk, SOCK_XDP);
350 }
351 
352 /* In case of DMA done not in order in lower device driver for some reason.
353  * upend_idx is used to track end of used idx, done_idx is used to track head
354  * of used idx. Once lower device DMA done contiguously, we will signal KVM
355  * guest used idx.
356  */
357 static void vhost_zerocopy_signal_used(struct vhost_net *net,
358 				       struct vhost_virtqueue *vq)
359 {
360 	struct vhost_net_virtqueue *nvq =
361 		container_of(vq, struct vhost_net_virtqueue, vq);
362 	int i, add;
363 	int j = 0;
364 
365 	for (i = nvq->done_idx; i != nvq->upend_idx; i = (i + 1) % UIO_MAXIOV) {
366 		if (vq->heads[i].len == VHOST_DMA_FAILED_LEN)
367 			vhost_net_tx_err(net);
368 		if (VHOST_DMA_IS_DONE(vq->heads[i].len)) {
369 			vq->heads[i].len = VHOST_DMA_CLEAR_LEN;
370 			++j;
371 		} else
372 			break;
373 	}
374 	while (j) {
375 		add = min(UIO_MAXIOV - nvq->done_idx, j);
376 		vhost_add_used_and_signal_n(vq->dev, vq,
377 					    &vq->heads[nvq->done_idx], add);
378 		nvq->done_idx = (nvq->done_idx + add) % UIO_MAXIOV;
379 		j -= add;
380 	}
381 }
382 
383 static void vhost_zerocopy_complete(struct sk_buff *skb,
384 				    struct ubuf_info *ubuf_base, bool success)
385 {
386 	struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
387 	struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
388 	struct vhost_virtqueue *vq = ubufs->vq;
389 	int cnt;
390 
391 	rcu_read_lock_bh();
392 
393 	/* set len to mark this desc buffers done DMA */
394 	vq->heads[ubuf->desc].len = success ?
395 		VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
396 	cnt = vhost_net_ubuf_put(ubufs);
397 
398 	/*
399 	 * Trigger polling thread if guest stopped submitting new buffers:
400 	 * in this case, the refcount after decrement will eventually reach 1.
401 	 * We also trigger polling periodically after each 16 packets
402 	 * (the value 16 here is more or less arbitrary, it's tuned to trigger
403 	 * less than 10% of times).
404 	 */
405 	if (cnt <= 1 || !(cnt % 16))
406 		vhost_poll_queue(&vq->poll);
407 
408 	rcu_read_unlock_bh();
409 }
410 
411 static const struct ubuf_info_ops vhost_ubuf_ops = {
412 	.complete = vhost_zerocopy_complete,
413 };
414 
415 static inline unsigned long busy_clock(void)
416 {
417 	return local_clock() >> 10;
418 }
419 
420 static bool vhost_can_busy_poll(unsigned long endtime)
421 {
422 	return likely(!need_resched() && !time_after(busy_clock(), endtime) &&
423 		      !signal_pending(current));
424 }
425 
426 static void vhost_net_disable_vq(struct vhost_net *n,
427 				 struct vhost_virtqueue *vq)
428 {
429 	struct vhost_net_virtqueue *nvq =
430 		container_of(vq, struct vhost_net_virtqueue, vq);
431 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
432 	if (!vhost_vq_get_backend(vq))
433 		return;
434 	vhost_poll_stop(poll);
435 }
436 
437 static int vhost_net_enable_vq(struct vhost_net *n,
438 				struct vhost_virtqueue *vq)
439 {
440 	struct vhost_net_virtqueue *nvq =
441 		container_of(vq, struct vhost_net_virtqueue, vq);
442 	struct vhost_poll *poll = n->poll + (nvq - n->vqs);
443 	struct socket *sock;
444 
445 	sock = vhost_vq_get_backend(vq);
446 	if (!sock)
447 		return 0;
448 
449 	return vhost_poll_start(poll, sock->file);
450 }
451 
452 static void vhost_net_signal_used(struct vhost_net_virtqueue *nvq)
453 {
454 	struct vhost_virtqueue *vq = &nvq->vq;
455 	struct vhost_dev *dev = vq->dev;
456 
457 	if (!nvq->done_idx)
458 		return;
459 
460 	vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx);
461 	nvq->done_idx = 0;
462 }
463 
464 static void vhost_tx_batch(struct vhost_net *net,
465 			   struct vhost_net_virtqueue *nvq,
466 			   struct socket *sock,
467 			   struct msghdr *msghdr)
468 {
469 	struct tun_msg_ctl ctl = {
470 		.type = TUN_MSG_PTR,
471 		.num = nvq->batched_xdp,
472 		.ptr = nvq->xdp,
473 	};
474 	int i, err;
475 
476 	if (nvq->batched_xdp == 0)
477 		goto signal_used;
478 
479 	msghdr->msg_control = &ctl;
480 	msghdr->msg_controllen = sizeof(ctl);
481 	err = sock->ops->sendmsg(sock, msghdr, 0);
482 	if (unlikely(err < 0)) {
483 		vq_err(&nvq->vq, "Fail to batch sending packets\n");
484 
485 		/* free pages owned by XDP; since this is an unlikely error path,
486 		 * keep it simple and avoid more complex bulk update for the
487 		 * used pages
488 		 */
489 		for (i = 0; i < nvq->batched_xdp; ++i)
490 			put_page(virt_to_head_page(nvq->xdp[i].data));
491 		nvq->batched_xdp = 0;
492 		nvq->done_idx = 0;
493 		return;
494 	}
495 
496 signal_used:
497 	vhost_net_signal_used(nvq);
498 	nvq->batched_xdp = 0;
499 }
500 
501 static int sock_has_rx_data(struct socket *sock)
502 {
503 	if (unlikely(!sock))
504 		return 0;
505 
506 	if (sock->ops->peek_len)
507 		return sock->ops->peek_len(sock);
508 
509 	return skb_queue_empty(&sock->sk->sk_receive_queue);
510 }
511 
512 static void vhost_net_busy_poll_try_queue(struct vhost_net *net,
513 					  struct vhost_virtqueue *vq)
514 {
515 	if (!vhost_vq_avail_empty(&net->dev, vq)) {
516 		vhost_poll_queue(&vq->poll);
517 	} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
518 		vhost_disable_notify(&net->dev, vq);
519 		vhost_poll_queue(&vq->poll);
520 	}
521 }
522 
523 static void vhost_net_busy_poll(struct vhost_net *net,
524 				struct vhost_virtqueue *rvq,
525 				struct vhost_virtqueue *tvq,
526 				bool *busyloop_intr,
527 				bool poll_rx)
528 {
529 	unsigned long busyloop_timeout;
530 	unsigned long endtime;
531 	struct socket *sock;
532 	struct vhost_virtqueue *vq = poll_rx ? tvq : rvq;
533 
534 	/* Try to hold the vq mutex of the paired virtqueue. We can't
535 	 * use mutex_lock() here since we could not guarantee a
536 	 * consistenet lock ordering.
537 	 */
538 	if (!mutex_trylock(&vq->mutex))
539 		return;
540 
541 	vhost_disable_notify(&net->dev, vq);
542 	sock = vhost_vq_get_backend(rvq);
543 
544 	busyloop_timeout = poll_rx ? rvq->busyloop_timeout:
545 				     tvq->busyloop_timeout;
546 
547 	preempt_disable();
548 	endtime = busy_clock() + busyloop_timeout;
549 
550 	while (vhost_can_busy_poll(endtime)) {
551 		if (vhost_vq_has_work(vq)) {
552 			*busyloop_intr = true;
553 			break;
554 		}
555 
556 		if ((sock_has_rx_data(sock) &&
557 		     !vhost_vq_avail_empty(&net->dev, rvq)) ||
558 		    !vhost_vq_avail_empty(&net->dev, tvq))
559 			break;
560 
561 		cpu_relax();
562 	}
563 
564 	preempt_enable();
565 
566 	if (poll_rx || sock_has_rx_data(sock))
567 		vhost_net_busy_poll_try_queue(net, vq);
568 	else if (!poll_rx) /* On tx here, sock has no rx data. */
569 		vhost_enable_notify(&net->dev, rvq);
570 
571 	mutex_unlock(&vq->mutex);
572 }
573 
574 static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
575 				    struct vhost_net_virtqueue *tnvq,
576 				    unsigned int *out_num, unsigned int *in_num,
577 				    struct msghdr *msghdr, bool *busyloop_intr)
578 {
579 	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
580 	struct vhost_virtqueue *rvq = &rnvq->vq;
581 	struct vhost_virtqueue *tvq = &tnvq->vq;
582 
583 	int r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
584 				  out_num, in_num, NULL, NULL);
585 
586 	if (r == tvq->num && tvq->busyloop_timeout) {
587 		/* Flush batched packets first */
588 		if (!vhost_sock_zcopy(vhost_vq_get_backend(tvq)))
589 			vhost_tx_batch(net, tnvq,
590 				       vhost_vq_get_backend(tvq),
591 				       msghdr);
592 
593 		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, false);
594 
595 		r = vhost_get_vq_desc(tvq, tvq->iov, ARRAY_SIZE(tvq->iov),
596 				      out_num, in_num, NULL, NULL);
597 	}
598 
599 	return r;
600 }
601 
602 static bool vhost_exceeds_maxpend(struct vhost_net *net)
603 {
604 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
605 	struct vhost_virtqueue *vq = &nvq->vq;
606 
607 	return (nvq->upend_idx + UIO_MAXIOV - nvq->done_idx) % UIO_MAXIOV >
608 	       min_t(unsigned int, VHOST_MAX_PEND, vq->num >> 2);
609 }
610 
611 static size_t init_iov_iter(struct vhost_virtqueue *vq, struct iov_iter *iter,
612 			    size_t hdr_size, int out)
613 {
614 	/* Skip header. TODO: support TSO. */
615 	size_t len = iov_length(vq->iov, out);
616 
617 	iov_iter_init(iter, ITER_SOURCE, vq->iov, out, len);
618 	iov_iter_advance(iter, hdr_size);
619 
620 	return iov_iter_count(iter);
621 }
622 
623 static int get_tx_bufs(struct vhost_net *net,
624 		       struct vhost_net_virtqueue *nvq,
625 		       struct msghdr *msg,
626 		       unsigned int *out, unsigned int *in,
627 		       size_t *len, bool *busyloop_intr)
628 {
629 	struct vhost_virtqueue *vq = &nvq->vq;
630 	int ret;
631 
632 	ret = vhost_net_tx_get_vq_desc(net, nvq, out, in, msg, busyloop_intr);
633 
634 	if (ret < 0 || ret == vq->num)
635 		return ret;
636 
637 	if (*in) {
638 		vq_err(vq, "Unexpected descriptor format for TX: out %d, int %d\n",
639 			*out, *in);
640 		return -EFAULT;
641 	}
642 
643 	/* Sanity check */
644 	*len = init_iov_iter(vq, &msg->msg_iter, nvq->vhost_hlen, *out);
645 	if (*len == 0) {
646 		vq_err(vq, "Unexpected header len for TX: %zd expected %zd\n",
647 			*len, nvq->vhost_hlen);
648 		return -EFAULT;
649 	}
650 
651 	return ret;
652 }
653 
654 static bool tx_can_batch(struct vhost_virtqueue *vq, size_t total_len)
655 {
656 	return total_len < VHOST_NET_WEIGHT &&
657 	       !vhost_vq_avail_empty(vq->dev, vq);
658 }
659 
660 #define VHOST_NET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
661 
662 static int vhost_net_build_xdp(struct vhost_net_virtqueue *nvq,
663 			       struct iov_iter *from)
664 {
665 	struct vhost_virtqueue *vq = &nvq->vq;
666 	struct vhost_net *net = container_of(vq->dev, struct vhost_net,
667 					     dev);
668 	struct socket *sock = vhost_vq_get_backend(vq);
669 	struct virtio_net_hdr *gso;
670 	struct xdp_buff *xdp = &nvq->xdp[nvq->batched_xdp];
671 	size_t len = iov_iter_count(from);
672 	int headroom = vhost_sock_xdp(sock) ? XDP_PACKET_HEADROOM : 0;
673 	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
674 	int pad = SKB_DATA_ALIGN(VHOST_NET_RX_PAD + headroom + nvq->sock_hlen);
675 	int sock_hlen = nvq->sock_hlen;
676 	void *buf;
677 	int copied;
678 	int ret;
679 
680 	if (unlikely(len < nvq->sock_hlen))
681 		return -EFAULT;
682 
683 	if (SKB_DATA_ALIGN(len + pad) +
684 	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
685 		return -ENOSPC;
686 
687 	buflen += SKB_DATA_ALIGN(len + pad);
688 	buf = page_frag_alloc_align(&net->pf_cache, buflen, GFP_KERNEL,
689 				    SMP_CACHE_BYTES);
690 	if (unlikely(!buf))
691 		return -ENOMEM;
692 
693 	copied = copy_from_iter(buf + pad - sock_hlen, len, from);
694 	if (copied != len) {
695 		ret = -EFAULT;
696 		goto err;
697 	}
698 
699 	gso = buf + pad - sock_hlen;
700 
701 	if (!sock_hlen)
702 		memset(buf, 0, pad);
703 
704 	if ((gso->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
705 	    vhost16_to_cpu(vq, gso->csum_start) +
706 	    vhost16_to_cpu(vq, gso->csum_offset) + 2 >
707 	    vhost16_to_cpu(vq, gso->hdr_len)) {
708 		gso->hdr_len = cpu_to_vhost16(vq,
709 			       vhost16_to_cpu(vq, gso->csum_start) +
710 			       vhost16_to_cpu(vq, gso->csum_offset) + 2);
711 
712 		if (vhost16_to_cpu(vq, gso->hdr_len) > len) {
713 			ret = -EINVAL;
714 			goto err;
715 		}
716 	}
717 
718 	/* pad contains sock_hlen */
719 	memcpy(buf, buf + pad - sock_hlen, sock_hlen);
720 
721 	xdp_init_buff(xdp, buflen, NULL);
722 	xdp_prepare_buff(xdp, buf, pad, len - sock_hlen, true);
723 
724 	++nvq->batched_xdp;
725 
726 	return 0;
727 
728 err:
729 	page_frag_free(buf);
730 	return ret;
731 }
732 
733 static void handle_tx_copy(struct vhost_net *net, struct socket *sock)
734 {
735 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
736 	struct vhost_virtqueue *vq = &nvq->vq;
737 	unsigned out, in;
738 	int head;
739 	struct msghdr msg = {
740 		.msg_name = NULL,
741 		.msg_namelen = 0,
742 		.msg_control = NULL,
743 		.msg_controllen = 0,
744 		.msg_flags = MSG_DONTWAIT,
745 	};
746 	size_t len, total_len = 0;
747 	int err;
748 	int sent_pkts = 0;
749 	bool sock_can_batch = (sock->sk->sk_sndbuf == INT_MAX);
750 	bool busyloop_intr;
751 
752 	do {
753 		busyloop_intr = false;
754 		if (nvq->done_idx == VHOST_NET_BATCH)
755 			vhost_tx_batch(net, nvq, sock, &msg);
756 
757 		head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
758 				   &busyloop_intr);
759 		/* On error, stop handling until the next kick. */
760 		if (unlikely(head < 0))
761 			break;
762 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
763 		if (head == vq->num) {
764 			/* Kicks are disabled at this point, break loop and
765 			 * process any remaining batched packets. Queue will
766 			 * be re-enabled afterwards.
767 			 */
768 			break;
769 		}
770 
771 		total_len += len;
772 
773 		/* For simplicity, TX batching is only enabled if
774 		 * sndbuf is unlimited.
775 		 */
776 		if (sock_can_batch) {
777 			err = vhost_net_build_xdp(nvq, &msg.msg_iter);
778 			if (!err) {
779 				goto done;
780 			} else if (unlikely(err != -ENOSPC)) {
781 				vhost_tx_batch(net, nvq, sock, &msg);
782 				vhost_discard_vq_desc(vq, 1);
783 				vhost_net_enable_vq(net, vq);
784 				break;
785 			}
786 
787 			/* We can't build XDP buff, go for single
788 			 * packet path but let's flush batched
789 			 * packets.
790 			 */
791 			vhost_tx_batch(net, nvq, sock, &msg);
792 			msg.msg_control = NULL;
793 		} else {
794 			if (tx_can_batch(vq, total_len))
795 				msg.msg_flags |= MSG_MORE;
796 			else
797 				msg.msg_flags &= ~MSG_MORE;
798 		}
799 
800 		err = sock->ops->sendmsg(sock, &msg, len);
801 		if (unlikely(err < 0)) {
802 			if (err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS) {
803 				vhost_discard_vq_desc(vq, 1);
804 				vhost_net_enable_vq(net, vq);
805 				break;
806 			}
807 			pr_debug("Fail to send packet: err %d", err);
808 		} else if (unlikely(err != len))
809 			pr_debug("Truncated TX packet: len %d != %zd\n",
810 				 err, len);
811 done:
812 		vq->heads[nvq->done_idx].id = cpu_to_vhost32(vq, head);
813 		vq->heads[nvq->done_idx].len = 0;
814 		++nvq->done_idx;
815 	} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
816 
817 	/* Kicks are still disabled, dispatch any remaining batched msgs. */
818 	vhost_tx_batch(net, nvq, sock, &msg);
819 
820 	if (unlikely(busyloop_intr))
821 		/* If interrupted while doing busy polling, requeue the
822 		 * handler to be fair handle_rx as well as other tasks
823 		 * waiting on cpu.
824 		 */
825 		vhost_poll_queue(&vq->poll);
826 	else
827 		/* All of our work has been completed; however, before
828 		 * leaving the TX handler, do one last check for work,
829 		 * and requeue handler if necessary. If there is no work,
830 		 * queue will be reenabled.
831 		 */
832 		vhost_net_busy_poll_try_queue(net, vq);
833 }
834 
835 static void handle_tx_zerocopy(struct vhost_net *net, struct socket *sock)
836 {
837 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
838 	struct vhost_virtqueue *vq = &nvq->vq;
839 	unsigned out, in;
840 	int head;
841 	struct msghdr msg = {
842 		.msg_name = NULL,
843 		.msg_namelen = 0,
844 		.msg_control = NULL,
845 		.msg_controllen = 0,
846 		.msg_flags = MSG_DONTWAIT,
847 	};
848 	struct tun_msg_ctl ctl;
849 	size_t len, total_len = 0;
850 	int err;
851 	struct vhost_net_ubuf_ref *ubufs;
852 	struct ubuf_info_msgzc *ubuf;
853 	bool zcopy_used;
854 	int sent_pkts = 0;
855 
856 	do {
857 		bool busyloop_intr;
858 
859 		/* Release DMAs done buffers first */
860 		vhost_zerocopy_signal_used(net, vq);
861 
862 		busyloop_intr = false;
863 		head = get_tx_bufs(net, nvq, &msg, &out, &in, &len,
864 				   &busyloop_intr);
865 		/* On error, stop handling until the next kick. */
866 		if (unlikely(head < 0))
867 			break;
868 		/* Nothing new?  Wait for eventfd to tell us they refilled. */
869 		if (head == vq->num) {
870 			if (unlikely(busyloop_intr)) {
871 				vhost_poll_queue(&vq->poll);
872 			} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
873 				vhost_disable_notify(&net->dev, vq);
874 				continue;
875 			}
876 			break;
877 		}
878 
879 		zcopy_used = len >= VHOST_GOODCOPY_LEN
880 			     && !vhost_exceeds_maxpend(net)
881 			     && vhost_net_tx_select_zcopy(net);
882 
883 		/* use msg_control to pass vhost zerocopy ubuf info to skb */
884 		if (zcopy_used) {
885 			ubuf = nvq->ubuf_info + nvq->upend_idx;
886 			vq->heads[nvq->upend_idx].id = cpu_to_vhost32(vq, head);
887 			vq->heads[nvq->upend_idx].len = VHOST_DMA_IN_PROGRESS;
888 			ubuf->ctx = nvq->ubufs;
889 			ubuf->desc = nvq->upend_idx;
890 			ubuf->ubuf.ops = &vhost_ubuf_ops;
891 			ubuf->ubuf.flags = SKBFL_ZEROCOPY_FRAG;
892 			refcount_set(&ubuf->ubuf.refcnt, 1);
893 			msg.msg_control = &ctl;
894 			ctl.type = TUN_MSG_UBUF;
895 			ctl.ptr = &ubuf->ubuf;
896 			msg.msg_controllen = sizeof(ctl);
897 			ubufs = nvq->ubufs;
898 			atomic_inc(&ubufs->refcount);
899 			nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
900 		} else {
901 			msg.msg_control = NULL;
902 			ubufs = NULL;
903 		}
904 		total_len += len;
905 		if (tx_can_batch(vq, total_len) &&
906 		    likely(!vhost_exceeds_maxpend(net))) {
907 			msg.msg_flags |= MSG_MORE;
908 		} else {
909 			msg.msg_flags &= ~MSG_MORE;
910 		}
911 
912 		err = sock->ops->sendmsg(sock, &msg, len);
913 		if (unlikely(err < 0)) {
914 			bool retry = err == -EAGAIN || err == -ENOMEM || err == -ENOBUFS;
915 
916 			if (zcopy_used) {
917 				if (vq->heads[ubuf->desc].len == VHOST_DMA_IN_PROGRESS)
918 					vhost_net_ubuf_put(ubufs);
919 				if (retry)
920 					nvq->upend_idx = ((unsigned)nvq->upend_idx - 1)
921 						% UIO_MAXIOV;
922 				else
923 					vq->heads[ubuf->desc].len = VHOST_DMA_DONE_LEN;
924 			}
925 			if (retry) {
926 				vhost_discard_vq_desc(vq, 1);
927 				vhost_net_enable_vq(net, vq);
928 				break;
929 			}
930 			pr_debug("Fail to send packet: err %d", err);
931 		} else if (unlikely(err != len))
932 			pr_debug("Truncated TX packet: "
933 				 " len %d != %zd\n", err, len);
934 		if (!zcopy_used)
935 			vhost_add_used_and_signal(&net->dev, vq, head, 0);
936 		else
937 			vhost_zerocopy_signal_used(net, vq);
938 		vhost_net_tx_packet(net);
939 	} while (likely(!vhost_exceeds_weight(vq, ++sent_pkts, total_len)));
940 }
941 
942 /* Expects to be always run from workqueue - which acts as
943  * read-size critical section for our kind of RCU. */
944 static void handle_tx(struct vhost_net *net)
945 {
946 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
947 	struct vhost_virtqueue *vq = &nvq->vq;
948 	struct socket *sock;
949 
950 	mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_TX);
951 	sock = vhost_vq_get_backend(vq);
952 	if (!sock)
953 		goto out;
954 
955 	if (!vq_meta_prefetch(vq))
956 		goto out;
957 
958 	vhost_disable_notify(&net->dev, vq);
959 	vhost_net_disable_vq(net, vq);
960 
961 	if (vhost_sock_zcopy(sock))
962 		handle_tx_zerocopy(net, sock);
963 	else
964 		handle_tx_copy(net, sock);
965 
966 out:
967 	mutex_unlock(&vq->mutex);
968 }
969 
970 static int peek_head_len(struct vhost_net_virtqueue *rvq, struct sock *sk)
971 {
972 	struct sk_buff *head;
973 	int len = 0;
974 	unsigned long flags;
975 
976 	if (rvq->rx_ring)
977 		return vhost_net_buf_peek(rvq);
978 
979 	spin_lock_irqsave(&sk->sk_receive_queue.lock, flags);
980 	head = skb_peek(&sk->sk_receive_queue);
981 	if (likely(head)) {
982 		len = head->len;
983 		if (skb_vlan_tag_present(head))
984 			len += VLAN_HLEN;
985 	}
986 
987 	spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags);
988 	return len;
989 }
990 
991 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk,
992 				      bool *busyloop_intr)
993 {
994 	struct vhost_net_virtqueue *rnvq = &net->vqs[VHOST_NET_VQ_RX];
995 	struct vhost_net_virtqueue *tnvq = &net->vqs[VHOST_NET_VQ_TX];
996 	struct vhost_virtqueue *rvq = &rnvq->vq;
997 	struct vhost_virtqueue *tvq = &tnvq->vq;
998 	int len = peek_head_len(rnvq, sk);
999 
1000 	if (!len && rvq->busyloop_timeout) {
1001 		/* Flush batched heads first */
1002 		vhost_net_signal_used(rnvq);
1003 		/* Both tx vq and rx socket were polled here */
1004 		vhost_net_busy_poll(net, rvq, tvq, busyloop_intr, true);
1005 
1006 		len = peek_head_len(rnvq, sk);
1007 	}
1008 
1009 	return len;
1010 }
1011 
1012 /* This is a multi-buffer version of vhost_get_desc, that works if
1013  *	vq has read descriptors only.
1014  * @vq		- the relevant virtqueue
1015  * @datalen	- data length we'll be reading
1016  * @iovcount	- returned count of io vectors we fill
1017  * @log		- vhost log
1018  * @log_num	- log offset
1019  * @quota       - headcount quota, 1 for big buffer
1020  *	returns number of buffer heads allocated, negative on error
1021  */
1022 static int get_rx_bufs(struct vhost_virtqueue *vq,
1023 		       struct vring_used_elem *heads,
1024 		       int datalen,
1025 		       unsigned *iovcount,
1026 		       struct vhost_log *log,
1027 		       unsigned *log_num,
1028 		       unsigned int quota)
1029 {
1030 	unsigned int out, in;
1031 	int seg = 0;
1032 	int headcount = 0;
1033 	unsigned d;
1034 	int r, nlogs = 0;
1035 	/* len is always initialized before use since we are always called with
1036 	 * datalen > 0.
1037 	 */
1038 	u32 len;
1039 
1040 	while (datalen > 0 && headcount < quota) {
1041 		if (unlikely(seg >= UIO_MAXIOV)) {
1042 			r = -ENOBUFS;
1043 			goto err;
1044 		}
1045 		r = vhost_get_vq_desc(vq, vq->iov + seg,
1046 				      ARRAY_SIZE(vq->iov) - seg, &out,
1047 				      &in, log, log_num);
1048 		if (unlikely(r < 0))
1049 			goto err;
1050 
1051 		d = r;
1052 		if (d == vq->num) {
1053 			r = 0;
1054 			goto err;
1055 		}
1056 		if (unlikely(out || in <= 0)) {
1057 			vq_err(vq, "unexpected descriptor format for RX: "
1058 				"out %d, in %d\n", out, in);
1059 			r = -EINVAL;
1060 			goto err;
1061 		}
1062 		if (unlikely(log)) {
1063 			nlogs += *log_num;
1064 			log += *log_num;
1065 		}
1066 		heads[headcount].id = cpu_to_vhost32(vq, d);
1067 		len = iov_length(vq->iov + seg, in);
1068 		heads[headcount].len = cpu_to_vhost32(vq, len);
1069 		datalen -= len;
1070 		++headcount;
1071 		seg += in;
1072 	}
1073 	heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
1074 	*iovcount = seg;
1075 	if (unlikely(log))
1076 		*log_num = nlogs;
1077 
1078 	/* Detect overrun */
1079 	if (unlikely(datalen > 0)) {
1080 		r = UIO_MAXIOV + 1;
1081 		goto err;
1082 	}
1083 	return headcount;
1084 err:
1085 	vhost_discard_vq_desc(vq, headcount);
1086 	return r;
1087 }
1088 
1089 /* Expects to be always run from workqueue - which acts as
1090  * read-size critical section for our kind of RCU. */
1091 static void handle_rx(struct vhost_net *net)
1092 {
1093 	struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_RX];
1094 	struct vhost_virtqueue *vq = &nvq->vq;
1095 	unsigned in, log;
1096 	struct vhost_log *vq_log;
1097 	struct msghdr msg = {
1098 		.msg_name = NULL,
1099 		.msg_namelen = 0,
1100 		.msg_control = NULL, /* FIXME: get and handle RX aux data. */
1101 		.msg_controllen = 0,
1102 		.msg_flags = MSG_DONTWAIT,
1103 	};
1104 	struct virtio_net_hdr hdr = {
1105 		.flags = 0,
1106 		.gso_type = VIRTIO_NET_HDR_GSO_NONE
1107 	};
1108 	size_t total_len = 0;
1109 	int err, mergeable;
1110 	s16 headcount;
1111 	size_t vhost_hlen, sock_hlen;
1112 	size_t vhost_len, sock_len;
1113 	bool busyloop_intr = false;
1114 	bool set_num_buffers;
1115 	struct socket *sock;
1116 	struct iov_iter fixup;
1117 	__virtio16 num_buffers;
1118 	int recv_pkts = 0;
1119 
1120 	mutex_lock_nested(&vq->mutex, VHOST_NET_VQ_RX);
1121 	sock = vhost_vq_get_backend(vq);
1122 	if (!sock)
1123 		goto out;
1124 
1125 	if (!vq_meta_prefetch(vq))
1126 		goto out;
1127 
1128 	vhost_disable_notify(&net->dev, vq);
1129 	vhost_net_disable_vq(net, vq);
1130 
1131 	vhost_hlen = nvq->vhost_hlen;
1132 	sock_hlen = nvq->sock_hlen;
1133 
1134 	vq_log = unlikely(vhost_has_feature(vq, VHOST_F_LOG_ALL)) ?
1135 		vq->log : NULL;
1136 	mergeable = vhost_has_feature(vq, VIRTIO_NET_F_MRG_RXBUF);
1137 	set_num_buffers = mergeable ||
1138 			  vhost_has_feature(vq, VIRTIO_F_VERSION_1);
1139 
1140 	do {
1141 		sock_len = vhost_net_rx_peek_head_len(net, sock->sk,
1142 						      &busyloop_intr);
1143 		if (!sock_len)
1144 			break;
1145 		sock_len += sock_hlen;
1146 		vhost_len = sock_len + vhost_hlen;
1147 		headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx,
1148 					vhost_len, &in, vq_log, &log,
1149 					likely(mergeable) ? UIO_MAXIOV : 1);
1150 		/* On error, stop handling until the next kick. */
1151 		if (unlikely(headcount < 0))
1152 			goto out;
1153 		/* OK, now we need to know about added descriptors. */
1154 		if (!headcount) {
1155 			if (unlikely(busyloop_intr)) {
1156 				vhost_poll_queue(&vq->poll);
1157 			} else if (unlikely(vhost_enable_notify(&net->dev, vq))) {
1158 				/* They have slipped one in as we were
1159 				 * doing that: check again. */
1160 				vhost_disable_notify(&net->dev, vq);
1161 				continue;
1162 			}
1163 			/* Nothing new?  Wait for eventfd to tell us
1164 			 * they refilled. */
1165 			goto out;
1166 		}
1167 		busyloop_intr = false;
1168 		if (nvq->rx_ring)
1169 			msg.msg_control = vhost_net_buf_consume(&nvq->rxq);
1170 		/* On overrun, truncate and discard */
1171 		if (unlikely(headcount > UIO_MAXIOV)) {
1172 			iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, 1, 1);
1173 			err = sock->ops->recvmsg(sock, &msg,
1174 						 1, MSG_DONTWAIT | MSG_TRUNC);
1175 			pr_debug("Discarded rx packet: len %zd\n", sock_len);
1176 			continue;
1177 		}
1178 		/* We don't need to be notified again. */
1179 		iov_iter_init(&msg.msg_iter, ITER_DEST, vq->iov, in, vhost_len);
1180 		fixup = msg.msg_iter;
1181 		if (unlikely((vhost_hlen))) {
1182 			/* We will supply the header ourselves
1183 			 * TODO: support TSO.
1184 			 */
1185 			iov_iter_advance(&msg.msg_iter, vhost_hlen);
1186 		}
1187 		err = sock->ops->recvmsg(sock, &msg,
1188 					 sock_len, MSG_DONTWAIT | MSG_TRUNC);
1189 		/* Userspace might have consumed the packet meanwhile:
1190 		 * it's not supposed to do this usually, but might be hard
1191 		 * to prevent. Discard data we got (if any) and keep going. */
1192 		if (unlikely(err != sock_len)) {
1193 			pr_debug("Discarded rx packet: "
1194 				 " len %d, expected %zd\n", err, sock_len);
1195 			vhost_discard_vq_desc(vq, headcount);
1196 			continue;
1197 		}
1198 		/* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */
1199 		if (unlikely(vhost_hlen)) {
1200 			if (copy_to_iter(&hdr, sizeof(hdr),
1201 					 &fixup) != sizeof(hdr)) {
1202 				vq_err(vq, "Unable to write vnet_hdr "
1203 				       "at addr %p\n", vq->iov->iov_base);
1204 				goto out;
1205 			}
1206 		} else {
1207 			/* Header came from socket; we'll need to patch
1208 			 * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF
1209 			 */
1210 			iov_iter_advance(&fixup, sizeof(hdr));
1211 		}
1212 		/* TODO: Should check and handle checksum. */
1213 
1214 		num_buffers = cpu_to_vhost16(vq, headcount);
1215 		if (likely(set_num_buffers) &&
1216 		    copy_to_iter(&num_buffers, sizeof num_buffers,
1217 				 &fixup) != sizeof num_buffers) {
1218 			vq_err(vq, "Failed num_buffers write");
1219 			vhost_discard_vq_desc(vq, headcount);
1220 			goto out;
1221 		}
1222 		nvq->done_idx += headcount;
1223 		if (nvq->done_idx > VHOST_NET_BATCH)
1224 			vhost_net_signal_used(nvq);
1225 		if (unlikely(vq_log))
1226 			vhost_log_write(vq, vq_log, log, vhost_len,
1227 					vq->iov, in);
1228 		total_len += vhost_len;
1229 	} while (likely(!vhost_exceeds_weight(vq, ++recv_pkts, total_len)));
1230 
1231 	if (unlikely(busyloop_intr))
1232 		vhost_poll_queue(&vq->poll);
1233 	else if (!sock_len)
1234 		vhost_net_enable_vq(net, vq);
1235 out:
1236 	vhost_net_signal_used(nvq);
1237 	mutex_unlock(&vq->mutex);
1238 }
1239 
1240 static void handle_tx_kick(struct vhost_work *work)
1241 {
1242 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1243 						  poll.work);
1244 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1245 
1246 	handle_tx(net);
1247 }
1248 
1249 static void handle_rx_kick(struct vhost_work *work)
1250 {
1251 	struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
1252 						  poll.work);
1253 	struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev);
1254 
1255 	handle_rx(net);
1256 }
1257 
1258 static void handle_tx_net(struct vhost_work *work)
1259 {
1260 	struct vhost_net *net = container_of(work, struct vhost_net,
1261 					     poll[VHOST_NET_VQ_TX].work);
1262 	handle_tx(net);
1263 }
1264 
1265 static void handle_rx_net(struct vhost_work *work)
1266 {
1267 	struct vhost_net *net = container_of(work, struct vhost_net,
1268 					     poll[VHOST_NET_VQ_RX].work);
1269 	handle_rx(net);
1270 }
1271 
1272 static int vhost_net_open(struct inode *inode, struct file *f)
1273 {
1274 	struct vhost_net *n;
1275 	struct vhost_dev *dev;
1276 	struct vhost_virtqueue **vqs;
1277 	void **queue;
1278 	struct xdp_buff *xdp;
1279 	int i;
1280 
1281 	n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1282 	if (!n)
1283 		return -ENOMEM;
1284 	vqs = kmalloc_array(VHOST_NET_VQ_MAX, sizeof(*vqs), GFP_KERNEL);
1285 	if (!vqs) {
1286 		kvfree(n);
1287 		return -ENOMEM;
1288 	}
1289 
1290 	queue = kmalloc_array(VHOST_NET_BATCH, sizeof(void *),
1291 			      GFP_KERNEL);
1292 	if (!queue) {
1293 		kfree(vqs);
1294 		kvfree(n);
1295 		return -ENOMEM;
1296 	}
1297 	n->vqs[VHOST_NET_VQ_RX].rxq.queue = queue;
1298 
1299 	xdp = kmalloc_array(VHOST_NET_BATCH, sizeof(*xdp), GFP_KERNEL);
1300 	if (!xdp) {
1301 		kfree(vqs);
1302 		kvfree(n);
1303 		kfree(queue);
1304 		return -ENOMEM;
1305 	}
1306 	n->vqs[VHOST_NET_VQ_TX].xdp = xdp;
1307 
1308 	dev = &n->dev;
1309 	vqs[VHOST_NET_VQ_TX] = &n->vqs[VHOST_NET_VQ_TX].vq;
1310 	vqs[VHOST_NET_VQ_RX] = &n->vqs[VHOST_NET_VQ_RX].vq;
1311 	n->vqs[VHOST_NET_VQ_TX].vq.handle_kick = handle_tx_kick;
1312 	n->vqs[VHOST_NET_VQ_RX].vq.handle_kick = handle_rx_kick;
1313 	for (i = 0; i < VHOST_NET_VQ_MAX; i++) {
1314 		n->vqs[i].ubufs = NULL;
1315 		n->vqs[i].ubuf_info = NULL;
1316 		n->vqs[i].upend_idx = 0;
1317 		n->vqs[i].done_idx = 0;
1318 		n->vqs[i].batched_xdp = 0;
1319 		n->vqs[i].vhost_hlen = 0;
1320 		n->vqs[i].sock_hlen = 0;
1321 		n->vqs[i].rx_ring = NULL;
1322 		vhost_net_buf_init(&n->vqs[i].rxq);
1323 	}
1324 	vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX,
1325 		       UIO_MAXIOV + VHOST_NET_BATCH,
1326 		       VHOST_NET_PKT_WEIGHT, VHOST_NET_WEIGHT, true,
1327 		       NULL);
1328 
1329 	vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev,
1330 			vqs[VHOST_NET_VQ_TX]);
1331 	vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev,
1332 			vqs[VHOST_NET_VQ_RX]);
1333 
1334 	f->private_data = n;
1335 	page_frag_cache_init(&n->pf_cache);
1336 
1337 	return 0;
1338 }
1339 
1340 static struct socket *vhost_net_stop_vq(struct vhost_net *n,
1341 					struct vhost_virtqueue *vq)
1342 {
1343 	struct socket *sock;
1344 	struct vhost_net_virtqueue *nvq =
1345 		container_of(vq, struct vhost_net_virtqueue, vq);
1346 
1347 	mutex_lock(&vq->mutex);
1348 	sock = vhost_vq_get_backend(vq);
1349 	vhost_net_disable_vq(n, vq);
1350 	vhost_vq_set_backend(vq, NULL);
1351 	vhost_net_buf_unproduce(nvq);
1352 	nvq->rx_ring = NULL;
1353 	mutex_unlock(&vq->mutex);
1354 	return sock;
1355 }
1356 
1357 static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
1358 			   struct socket **rx_sock)
1359 {
1360 	*tx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_TX].vq);
1361 	*rx_sock = vhost_net_stop_vq(n, &n->vqs[VHOST_NET_VQ_RX].vq);
1362 }
1363 
1364 static void vhost_net_flush(struct vhost_net *n)
1365 {
1366 	vhost_dev_flush(&n->dev);
1367 	if (n->vqs[VHOST_NET_VQ_TX].ubufs) {
1368 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1369 		n->tx_flush = true;
1370 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1371 		/* Wait for all lower device DMAs done. */
1372 		vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
1373 		mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1374 		n->tx_flush = false;
1375 		atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
1376 		mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
1377 	}
1378 }
1379 
1380 static int vhost_net_release(struct inode *inode, struct file *f)
1381 {
1382 	struct vhost_net *n = f->private_data;
1383 	struct socket *tx_sock;
1384 	struct socket *rx_sock;
1385 
1386 	vhost_net_stop(n, &tx_sock, &rx_sock);
1387 	vhost_net_flush(n);
1388 	vhost_dev_stop(&n->dev);
1389 	vhost_dev_cleanup(&n->dev);
1390 	vhost_net_vq_reset(n);
1391 	if (tx_sock)
1392 		sockfd_put(tx_sock);
1393 	if (rx_sock)
1394 		sockfd_put(rx_sock);
1395 	/* Make sure no callbacks are outstanding */
1396 	synchronize_rcu();
1397 	/* We do an extra flush before freeing memory,
1398 	 * since jobs can re-queue themselves. */
1399 	vhost_net_flush(n);
1400 	kfree(n->vqs[VHOST_NET_VQ_RX].rxq.queue);
1401 	kfree(n->vqs[VHOST_NET_VQ_TX].xdp);
1402 	kfree(n->dev.vqs);
1403 	page_frag_cache_drain(&n->pf_cache);
1404 	kvfree(n);
1405 	return 0;
1406 }
1407 
1408 static struct socket *get_raw_socket(int fd)
1409 {
1410 	int r;
1411 	struct socket *sock = sockfd_lookup(fd, &r);
1412 
1413 	if (!sock)
1414 		return ERR_PTR(-ENOTSOCK);
1415 
1416 	/* Parameter checking */
1417 	if (sock->sk->sk_type != SOCK_RAW) {
1418 		r = -ESOCKTNOSUPPORT;
1419 		goto err;
1420 	}
1421 
1422 	if (sock->sk->sk_family != AF_PACKET) {
1423 		r = -EPFNOSUPPORT;
1424 		goto err;
1425 	}
1426 	return sock;
1427 err:
1428 	sockfd_put(sock);
1429 	return ERR_PTR(r);
1430 }
1431 
1432 static struct ptr_ring *get_tap_ptr_ring(struct file *file)
1433 {
1434 	struct ptr_ring *ring;
1435 	ring = tun_get_tx_ring(file);
1436 	if (!IS_ERR(ring))
1437 		goto out;
1438 	ring = tap_get_ptr_ring(file);
1439 	if (!IS_ERR(ring))
1440 		goto out;
1441 	ring = NULL;
1442 out:
1443 	return ring;
1444 }
1445 
1446 static struct socket *get_tap_socket(int fd)
1447 {
1448 	struct file *file = fget(fd);
1449 	struct socket *sock;
1450 
1451 	if (!file)
1452 		return ERR_PTR(-EBADF);
1453 	sock = tun_get_socket(file);
1454 	if (!IS_ERR(sock))
1455 		return sock;
1456 	sock = tap_get_socket(file);
1457 	if (IS_ERR(sock))
1458 		fput(file);
1459 	return sock;
1460 }
1461 
1462 static struct socket *get_socket(int fd)
1463 {
1464 	struct socket *sock;
1465 
1466 	/* special case to disable backend */
1467 	if (fd == -1)
1468 		return NULL;
1469 	sock = get_raw_socket(fd);
1470 	if (!IS_ERR(sock))
1471 		return sock;
1472 	sock = get_tap_socket(fd);
1473 	if (!IS_ERR(sock))
1474 		return sock;
1475 	return ERR_PTR(-ENOTSOCK);
1476 }
1477 
1478 static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
1479 {
1480 	struct socket *sock, *oldsock;
1481 	struct vhost_virtqueue *vq;
1482 	struct vhost_net_virtqueue *nvq;
1483 	struct vhost_net_ubuf_ref *ubufs, *oldubufs = NULL;
1484 	int r;
1485 
1486 	mutex_lock(&n->dev.mutex);
1487 	r = vhost_dev_check_owner(&n->dev);
1488 	if (r)
1489 		goto err;
1490 
1491 	if (index >= VHOST_NET_VQ_MAX) {
1492 		r = -ENOBUFS;
1493 		goto err;
1494 	}
1495 	vq = &n->vqs[index].vq;
1496 	nvq = &n->vqs[index];
1497 	mutex_lock(&vq->mutex);
1498 
1499 	if (fd == -1)
1500 		vhost_clear_msg(&n->dev);
1501 
1502 	/* Verify that ring has been setup correctly. */
1503 	if (!vhost_vq_access_ok(vq)) {
1504 		r = -EFAULT;
1505 		goto err_vq;
1506 	}
1507 	sock = get_socket(fd);
1508 	if (IS_ERR(sock)) {
1509 		r = PTR_ERR(sock);
1510 		goto err_vq;
1511 	}
1512 
1513 	/* start polling new socket */
1514 	oldsock = vhost_vq_get_backend(vq);
1515 	if (sock != oldsock) {
1516 		ubufs = vhost_net_ubuf_alloc(vq,
1517 					     sock && vhost_sock_zcopy(sock));
1518 		if (IS_ERR(ubufs)) {
1519 			r = PTR_ERR(ubufs);
1520 			goto err_ubufs;
1521 		}
1522 
1523 		vhost_net_disable_vq(n, vq);
1524 		vhost_vq_set_backend(vq, sock);
1525 		vhost_net_buf_unproduce(nvq);
1526 		r = vhost_vq_init_access(vq);
1527 		if (r)
1528 			goto err_used;
1529 		r = vhost_net_enable_vq(n, vq);
1530 		if (r)
1531 			goto err_used;
1532 		if (index == VHOST_NET_VQ_RX) {
1533 			if (sock)
1534 				nvq->rx_ring = get_tap_ptr_ring(sock->file);
1535 			else
1536 				nvq->rx_ring = NULL;
1537 		}
1538 
1539 		oldubufs = nvq->ubufs;
1540 		nvq->ubufs = ubufs;
1541 
1542 		n->tx_packets = 0;
1543 		n->tx_zcopy_err = 0;
1544 		n->tx_flush = false;
1545 	}
1546 
1547 	mutex_unlock(&vq->mutex);
1548 
1549 	if (oldubufs) {
1550 		vhost_net_ubuf_put_wait_and_free(oldubufs);
1551 		mutex_lock(&vq->mutex);
1552 		vhost_zerocopy_signal_used(n, vq);
1553 		mutex_unlock(&vq->mutex);
1554 	}
1555 
1556 	if (oldsock) {
1557 		vhost_dev_flush(&n->dev);
1558 		sockfd_put(oldsock);
1559 	}
1560 
1561 	mutex_unlock(&n->dev.mutex);
1562 	return 0;
1563 
1564 err_used:
1565 	vhost_vq_set_backend(vq, oldsock);
1566 	vhost_net_enable_vq(n, vq);
1567 	if (ubufs)
1568 		vhost_net_ubuf_put_wait_and_free(ubufs);
1569 err_ubufs:
1570 	if (sock)
1571 		sockfd_put(sock);
1572 err_vq:
1573 	mutex_unlock(&vq->mutex);
1574 err:
1575 	mutex_unlock(&n->dev.mutex);
1576 	return r;
1577 }
1578 
1579 static long vhost_net_reset_owner(struct vhost_net *n)
1580 {
1581 	struct socket *tx_sock = NULL;
1582 	struct socket *rx_sock = NULL;
1583 	long err;
1584 	struct vhost_iotlb *umem;
1585 
1586 	mutex_lock(&n->dev.mutex);
1587 	err = vhost_dev_check_owner(&n->dev);
1588 	if (err)
1589 		goto done;
1590 	umem = vhost_dev_reset_owner_prepare();
1591 	if (!umem) {
1592 		err = -ENOMEM;
1593 		goto done;
1594 	}
1595 	vhost_net_stop(n, &tx_sock, &rx_sock);
1596 	vhost_net_flush(n);
1597 	vhost_dev_stop(&n->dev);
1598 	vhost_dev_reset_owner(&n->dev, umem);
1599 	vhost_net_vq_reset(n);
1600 done:
1601 	mutex_unlock(&n->dev.mutex);
1602 	if (tx_sock)
1603 		sockfd_put(tx_sock);
1604 	if (rx_sock)
1605 		sockfd_put(rx_sock);
1606 	return err;
1607 }
1608 
1609 static int vhost_net_set_features(struct vhost_net *n, u64 features)
1610 {
1611 	size_t vhost_hlen, sock_hlen, hdr_len;
1612 	int i;
1613 
1614 	hdr_len = (features & ((1ULL << VIRTIO_NET_F_MRG_RXBUF) |
1615 			       (1ULL << VIRTIO_F_VERSION_1))) ?
1616 			sizeof(struct virtio_net_hdr_mrg_rxbuf) :
1617 			sizeof(struct virtio_net_hdr);
1618 	if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) {
1619 		/* vhost provides vnet_hdr */
1620 		vhost_hlen = hdr_len;
1621 		sock_hlen = 0;
1622 	} else {
1623 		/* socket provides vnet_hdr */
1624 		vhost_hlen = 0;
1625 		sock_hlen = hdr_len;
1626 	}
1627 	mutex_lock(&n->dev.mutex);
1628 	if ((features & (1 << VHOST_F_LOG_ALL)) &&
1629 	    !vhost_log_access_ok(&n->dev))
1630 		goto out_unlock;
1631 
1632 	if ((features & (1ULL << VIRTIO_F_ACCESS_PLATFORM))) {
1633 		if (vhost_init_device_iotlb(&n->dev))
1634 			goto out_unlock;
1635 	}
1636 
1637 	for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
1638 		mutex_lock(&n->vqs[i].vq.mutex);
1639 		n->vqs[i].vq.acked_features = features;
1640 		n->vqs[i].vhost_hlen = vhost_hlen;
1641 		n->vqs[i].sock_hlen = sock_hlen;
1642 		mutex_unlock(&n->vqs[i].vq.mutex);
1643 	}
1644 	mutex_unlock(&n->dev.mutex);
1645 	return 0;
1646 
1647 out_unlock:
1648 	mutex_unlock(&n->dev.mutex);
1649 	return -EFAULT;
1650 }
1651 
1652 static long vhost_net_set_owner(struct vhost_net *n)
1653 {
1654 	int r;
1655 
1656 	mutex_lock(&n->dev.mutex);
1657 	if (vhost_dev_has_owner(&n->dev)) {
1658 		r = -EBUSY;
1659 		goto out;
1660 	}
1661 	r = vhost_net_set_ubuf_info(n);
1662 	if (r)
1663 		goto out;
1664 	r = vhost_dev_set_owner(&n->dev);
1665 	if (r)
1666 		vhost_net_clear_ubuf_info(n);
1667 	vhost_net_flush(n);
1668 out:
1669 	mutex_unlock(&n->dev.mutex);
1670 	return r;
1671 }
1672 
1673 static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
1674 			    unsigned long arg)
1675 {
1676 	struct vhost_net *n = f->private_data;
1677 	void __user *argp = (void __user *)arg;
1678 	u64 __user *featurep = argp;
1679 	struct vhost_vring_file backend;
1680 	u64 features;
1681 	int r;
1682 
1683 	switch (ioctl) {
1684 	case VHOST_NET_SET_BACKEND:
1685 		if (copy_from_user(&backend, argp, sizeof backend))
1686 			return -EFAULT;
1687 		return vhost_net_set_backend(n, backend.index, backend.fd);
1688 	case VHOST_GET_FEATURES:
1689 		features = VHOST_NET_FEATURES;
1690 		if (copy_to_user(featurep, &features, sizeof features))
1691 			return -EFAULT;
1692 		return 0;
1693 	case VHOST_SET_FEATURES:
1694 		if (copy_from_user(&features, featurep, sizeof features))
1695 			return -EFAULT;
1696 		if (features & ~VHOST_NET_FEATURES)
1697 			return -EOPNOTSUPP;
1698 		return vhost_net_set_features(n, features);
1699 	case VHOST_GET_BACKEND_FEATURES:
1700 		features = VHOST_NET_BACKEND_FEATURES;
1701 		if (copy_to_user(featurep, &features, sizeof(features)))
1702 			return -EFAULT;
1703 		return 0;
1704 	case VHOST_SET_BACKEND_FEATURES:
1705 		if (copy_from_user(&features, featurep, sizeof(features)))
1706 			return -EFAULT;
1707 		if (features & ~VHOST_NET_BACKEND_FEATURES)
1708 			return -EOPNOTSUPP;
1709 		vhost_set_backend_features(&n->dev, features);
1710 		return 0;
1711 	case VHOST_RESET_OWNER:
1712 		return vhost_net_reset_owner(n);
1713 	case VHOST_SET_OWNER:
1714 		return vhost_net_set_owner(n);
1715 	default:
1716 		mutex_lock(&n->dev.mutex);
1717 		r = vhost_dev_ioctl(&n->dev, ioctl, argp);
1718 		if (r == -ENOIOCTLCMD)
1719 			r = vhost_vring_ioctl(&n->dev, ioctl, argp);
1720 		else
1721 			vhost_net_flush(n);
1722 		mutex_unlock(&n->dev.mutex);
1723 		return r;
1724 	}
1725 }
1726 
1727 static ssize_t vhost_net_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
1728 {
1729 	struct file *file = iocb->ki_filp;
1730 	struct vhost_net *n = file->private_data;
1731 	struct vhost_dev *dev = &n->dev;
1732 	int noblock = file->f_flags & O_NONBLOCK;
1733 
1734 	return vhost_chr_read_iter(dev, to, noblock);
1735 }
1736 
1737 static ssize_t vhost_net_chr_write_iter(struct kiocb *iocb,
1738 					struct iov_iter *from)
1739 {
1740 	struct file *file = iocb->ki_filp;
1741 	struct vhost_net *n = file->private_data;
1742 	struct vhost_dev *dev = &n->dev;
1743 
1744 	return vhost_chr_write_iter(dev, from);
1745 }
1746 
1747 static __poll_t vhost_net_chr_poll(struct file *file, poll_table *wait)
1748 {
1749 	struct vhost_net *n = file->private_data;
1750 	struct vhost_dev *dev = &n->dev;
1751 
1752 	return vhost_chr_poll(file, dev, wait);
1753 }
1754 
1755 static const struct file_operations vhost_net_fops = {
1756 	.owner          = THIS_MODULE,
1757 	.release        = vhost_net_release,
1758 	.read_iter      = vhost_net_chr_read_iter,
1759 	.write_iter     = vhost_net_chr_write_iter,
1760 	.poll           = vhost_net_chr_poll,
1761 	.unlocked_ioctl = vhost_net_ioctl,
1762 	.compat_ioctl   = compat_ptr_ioctl,
1763 	.open           = vhost_net_open,
1764 	.llseek		= noop_llseek,
1765 };
1766 
1767 static struct miscdevice vhost_net_misc = {
1768 	.minor = VHOST_NET_MINOR,
1769 	.name = "vhost-net",
1770 	.fops = &vhost_net_fops,
1771 };
1772 
1773 static int __init vhost_net_init(void)
1774 {
1775 	if (experimental_zcopytx)
1776 		vhost_net_enable_zcopy(VHOST_NET_VQ_TX);
1777 	return misc_register(&vhost_net_misc);
1778 }
1779 module_init(vhost_net_init);
1780 
1781 static void __exit vhost_net_exit(void)
1782 {
1783 	misc_deregister(&vhost_net_misc);
1784 }
1785 module_exit(vhost_net_exit);
1786 
1787 MODULE_VERSION("0.0.1");
1788 MODULE_LICENSE("GPL v2");
1789 MODULE_AUTHOR("Michael S. Tsirkin");
1790 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
1791 MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
1792 MODULE_ALIAS("devname:vhost-net");
1793