xref: /linux/drivers/net/virtio_net.c (revision 6f0310a126f1a46cac366327751bb7eb8941bdde)
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, see <http://www.gnu.org/licenses/>.
17  */
18 //#define DEBUG
19 #include <linux/netdevice.h>
20 #include <linux/etherdevice.h>
21 #include <linux/ethtool.h>
22 #include <linux/module.h>
23 #include <linux/virtio.h>
24 #include <linux/virtio_net.h>
25 #include <linux/bpf.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 #include <linux/slab.h>
29 #include <linux/cpu.h>
30 #include <linux/average.h>
31 #include <net/busy_poll.h>
32 
33 static int napi_weight = NAPI_POLL_WEIGHT;
34 module_param(napi_weight, int, 0444);
35 
36 static bool csum = true, gso = true;
37 module_param(csum, bool, 0444);
38 module_param(gso, bool, 0444);
39 
40 /* FIXME: MTU in config. */
41 #define GOOD_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
42 #define GOOD_COPY_LEN	128
43 
44 /* RX packet size EWMA. The average packet size is used to determine the packet
45  * buffer size when refilling RX rings. As the entire RX ring may be refilled
46  * at once, the weight is chosen so that the EWMA will be insensitive to short-
47  * term, transient changes in packet size.
48  */
49 DECLARE_EWMA(pkt_len, 1, 64)
50 
51 /* With mergeable buffers we align buffer address and use the low bits to
52  * encode its true size. Buffer size is up to 1 page so we need to align to
53  * square root of page size to ensure we reserve enough bits to encode the true
54  * size.
55  */
56 #define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
57 
58 /* Minimum alignment for mergeable packet buffers. */
59 #define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
60 				   1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
61 
62 #define VIRTNET_DRIVER_VERSION "1.0.0"
63 
64 struct virtnet_stats {
65 	struct u64_stats_sync tx_syncp;
66 	struct u64_stats_sync rx_syncp;
67 	u64 tx_bytes;
68 	u64 tx_packets;
69 
70 	u64 rx_bytes;
71 	u64 rx_packets;
72 };
73 
74 /* Internal representation of a send virtqueue */
75 struct send_queue {
76 	/* Virtqueue associated with this send _queue */
77 	struct virtqueue *vq;
78 
79 	/* TX: fragments + linear part + virtio header */
80 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
81 
82 	/* Name of the send queue: output.$index */
83 	char name[40];
84 };
85 
86 /* Internal representation of a receive virtqueue */
87 struct receive_queue {
88 	/* Virtqueue associated with this receive_queue */
89 	struct virtqueue *vq;
90 
91 	struct napi_struct napi;
92 
93 	struct bpf_prog __rcu *xdp_prog;
94 
95 	/* Chain pages by the private ptr. */
96 	struct page *pages;
97 
98 	/* Average packet length for mergeable receive buffers. */
99 	struct ewma_pkt_len mrg_avg_pkt_len;
100 
101 	/* Page frag for packet buffer allocation. */
102 	struct page_frag alloc_frag;
103 
104 	/* RX: fragments + linear part + virtio header */
105 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
106 
107 	/* Name of this receive queue: input.$index */
108 	char name[40];
109 };
110 
111 struct virtnet_info {
112 	struct virtio_device *vdev;
113 	struct virtqueue *cvq;
114 	struct net_device *dev;
115 	struct send_queue *sq;
116 	struct receive_queue *rq;
117 	unsigned int status;
118 
119 	/* Max # of queue pairs supported by the device */
120 	u16 max_queue_pairs;
121 
122 	/* # of queue pairs currently used by the driver */
123 	u16 curr_queue_pairs;
124 
125 	/* # of XDP queue pairs currently used by the driver */
126 	u16 xdp_queue_pairs;
127 
128 	/* I like... big packets and I cannot lie! */
129 	bool big_packets;
130 
131 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
132 	bool mergeable_rx_bufs;
133 
134 	/* Has control virtqueue */
135 	bool has_cvq;
136 
137 	/* Host can handle any s/g split between our header and packet data */
138 	bool any_header_sg;
139 
140 	/* Packet virtio header size */
141 	u8 hdr_len;
142 
143 	/* Active statistics */
144 	struct virtnet_stats __percpu *stats;
145 
146 	/* Work struct for refilling if we run low on memory. */
147 	struct delayed_work refill;
148 
149 	/* Work struct for config space updates */
150 	struct work_struct config_work;
151 
152 	/* Does the affinity hint is set for virtqueues? */
153 	bool affinity_hint_set;
154 
155 	/* CPU hotplug instances for online & dead */
156 	struct hlist_node node;
157 	struct hlist_node node_dead;
158 
159 	/* Control VQ buffers: protected by the rtnl lock */
160 	struct virtio_net_ctrl_hdr ctrl_hdr;
161 	virtio_net_ctrl_ack ctrl_status;
162 	struct virtio_net_ctrl_mq ctrl_mq;
163 	u8 ctrl_promisc;
164 	u8 ctrl_allmulti;
165 	u16 ctrl_vid;
166 
167 	/* Ethtool settings */
168 	u8 duplex;
169 	u32 speed;
170 };
171 
172 struct padded_vnet_hdr {
173 	struct virtio_net_hdr_mrg_rxbuf hdr;
174 	/*
175 	 * hdr is in a separate sg buffer, and data sg buffer shares same page
176 	 * with this header sg. This padding makes next sg 16 byte aligned
177 	 * after the header.
178 	 */
179 	char padding[4];
180 };
181 
182 /* Converting between virtqueue no. and kernel tx/rx queue no.
183  * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq
184  */
185 static int vq2txq(struct virtqueue *vq)
186 {
187 	return (vq->index - 1) / 2;
188 }
189 
190 static int txq2vq(int txq)
191 {
192 	return txq * 2 + 1;
193 }
194 
195 static int vq2rxq(struct virtqueue *vq)
196 {
197 	return vq->index / 2;
198 }
199 
200 static int rxq2vq(int rxq)
201 {
202 	return rxq * 2;
203 }
204 
205 static inline struct virtio_net_hdr_mrg_rxbuf *skb_vnet_hdr(struct sk_buff *skb)
206 {
207 	return (struct virtio_net_hdr_mrg_rxbuf *)skb->cb;
208 }
209 
210 /*
211  * private is used to chain pages for big packets, put the whole
212  * most recent used list in the beginning for reuse
213  */
214 static void give_pages(struct receive_queue *rq, struct page *page)
215 {
216 	struct page *end;
217 
218 	/* Find end of list, sew whole thing into vi->rq.pages. */
219 	for (end = page; end->private; end = (struct page *)end->private);
220 	end->private = (unsigned long)rq->pages;
221 	rq->pages = page;
222 }
223 
224 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask)
225 {
226 	struct page *p = rq->pages;
227 
228 	if (p) {
229 		rq->pages = (struct page *)p->private;
230 		/* clear private here, it is used to chain pages */
231 		p->private = 0;
232 	} else
233 		p = alloc_page(gfp_mask);
234 	return p;
235 }
236 
237 static void skb_xmit_done(struct virtqueue *vq)
238 {
239 	struct virtnet_info *vi = vq->vdev->priv;
240 
241 	/* Suppress further interrupts. */
242 	virtqueue_disable_cb(vq);
243 
244 	/* We were probably waiting for more output buffers. */
245 	netif_wake_subqueue(vi->dev, vq2txq(vq));
246 }
247 
248 static unsigned int mergeable_ctx_to_buf_truesize(unsigned long mrg_ctx)
249 {
250 	unsigned int truesize = mrg_ctx & (MERGEABLE_BUFFER_ALIGN - 1);
251 	return (truesize + 1) * MERGEABLE_BUFFER_ALIGN;
252 }
253 
254 static void *mergeable_ctx_to_buf_address(unsigned long mrg_ctx)
255 {
256 	return (void *)(mrg_ctx & -MERGEABLE_BUFFER_ALIGN);
257 
258 }
259 
260 static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize)
261 {
262 	unsigned int size = truesize / MERGEABLE_BUFFER_ALIGN;
263 	return (unsigned long)buf | (size - 1);
264 }
265 
266 /* Called from bottom half context */
267 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
268 				   struct receive_queue *rq,
269 				   struct page *page, unsigned int offset,
270 				   unsigned int len, unsigned int truesize)
271 {
272 	struct sk_buff *skb;
273 	struct virtio_net_hdr_mrg_rxbuf *hdr;
274 	unsigned int copy, hdr_len, hdr_padded_len;
275 	char *p;
276 
277 	p = page_address(page) + offset;
278 
279 	/* copy small packet so we can reuse these pages for small data */
280 	skb = napi_alloc_skb(&rq->napi, GOOD_COPY_LEN);
281 	if (unlikely(!skb))
282 		return NULL;
283 
284 	hdr = skb_vnet_hdr(skb);
285 
286 	hdr_len = vi->hdr_len;
287 	if (vi->mergeable_rx_bufs)
288 		hdr_padded_len = sizeof *hdr;
289 	else
290 		hdr_padded_len = sizeof(struct padded_vnet_hdr);
291 
292 	memcpy(hdr, p, hdr_len);
293 
294 	len -= hdr_len;
295 	offset += hdr_padded_len;
296 	p += hdr_padded_len;
297 
298 	copy = len;
299 	if (copy > skb_tailroom(skb))
300 		copy = skb_tailroom(skb);
301 	memcpy(skb_put(skb, copy), p, copy);
302 
303 	len -= copy;
304 	offset += copy;
305 
306 	if (vi->mergeable_rx_bufs) {
307 		if (len)
308 			skb_add_rx_frag(skb, 0, page, offset, len, truesize);
309 		else
310 			put_page(page);
311 		return skb;
312 	}
313 
314 	/*
315 	 * Verify that we can indeed put this data into a skb.
316 	 * This is here to handle cases when the device erroneously
317 	 * tries to receive more than is possible. This is usually
318 	 * the case of a broken device.
319 	 */
320 	if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
321 		net_dbg_ratelimited("%s: too much data\n", skb->dev->name);
322 		dev_kfree_skb(skb);
323 		return NULL;
324 	}
325 	BUG_ON(offset >= PAGE_SIZE);
326 	while (len) {
327 		unsigned int frag_size = min((unsigned)PAGE_SIZE - offset, len);
328 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, offset,
329 				frag_size, truesize);
330 		len -= frag_size;
331 		page = (struct page *)page->private;
332 		offset = 0;
333 	}
334 
335 	if (page)
336 		give_pages(rq, page);
337 
338 	return skb;
339 }
340 
341 static void virtnet_xdp_xmit(struct virtnet_info *vi,
342 			     struct receive_queue *rq,
343 			     struct send_queue *sq,
344 			     struct xdp_buff *xdp,
345 			     void *data)
346 {
347 	struct virtio_net_hdr_mrg_rxbuf *hdr;
348 	unsigned int num_sg, len;
349 	void *xdp_sent;
350 	int err;
351 
352 	/* Free up any pending old buffers before queueing new ones. */
353 	while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
354 		if (vi->mergeable_rx_bufs) {
355 			struct page *sent_page = virt_to_head_page(xdp_sent);
356 
357 			put_page(sent_page);
358 		} else { /* small buffer */
359 			struct sk_buff *skb = xdp_sent;
360 
361 			kfree_skb(skb);
362 		}
363 	}
364 
365 	if (vi->mergeable_rx_bufs) {
366 		/* Zero header and leave csum up to XDP layers */
367 		hdr = xdp->data;
368 		memset(hdr, 0, vi->hdr_len);
369 
370 		num_sg = 1;
371 		sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
372 	} else { /* small buffer */
373 		struct sk_buff *skb = data;
374 
375 		/* Zero header and leave csum up to XDP layers */
376 		hdr = skb_vnet_hdr(skb);
377 		memset(hdr, 0, vi->hdr_len);
378 
379 		num_sg = 2;
380 		sg_init_table(sq->sg, 2);
381 		sg_set_buf(sq->sg, hdr, vi->hdr_len);
382 		skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
383 	}
384 	err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
385 				   data, GFP_ATOMIC);
386 	if (unlikely(err)) {
387 		if (vi->mergeable_rx_bufs) {
388 			struct page *page = virt_to_head_page(xdp->data);
389 
390 			put_page(page);
391 		} else /* small buffer */
392 			kfree_skb(data);
393 		return; // On error abort to avoid unnecessary kick
394 	}
395 
396 	virtqueue_kick(sq->vq);
397 }
398 
399 static u32 do_xdp_prog(struct virtnet_info *vi,
400 		       struct receive_queue *rq,
401 		       struct bpf_prog *xdp_prog,
402 		       void *data, int len)
403 {
404 	int hdr_padded_len;
405 	struct xdp_buff xdp;
406 	void *buf;
407 	unsigned int qp;
408 	u32 act;
409 
410 	if (vi->mergeable_rx_bufs) {
411 		hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
412 		xdp.data = data + hdr_padded_len;
413 		xdp.data_end = xdp.data + (len - vi->hdr_len);
414 		buf = data;
415 	} else { /* small buffers */
416 		struct sk_buff *skb = data;
417 
418 		xdp.data = skb->data;
419 		xdp.data_end = xdp.data + len;
420 		buf = skb->data;
421 	}
422 
423 	act = bpf_prog_run_xdp(xdp_prog, &xdp);
424 	switch (act) {
425 	case XDP_PASS:
426 		return XDP_PASS;
427 	case XDP_TX:
428 		qp = vi->curr_queue_pairs -
429 			vi->xdp_queue_pairs +
430 			smp_processor_id();
431 		xdp.data = buf;
432 		virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
433 		return XDP_TX;
434 	default:
435 		bpf_warn_invalid_xdp_action(act);
436 	case XDP_ABORTED:
437 	case XDP_DROP:
438 		return XDP_DROP;
439 	}
440 }
441 
442 static struct sk_buff *receive_small(struct net_device *dev,
443 				     struct virtnet_info *vi,
444 				     struct receive_queue *rq,
445 				     void *buf, unsigned int len)
446 {
447 	struct sk_buff * skb = buf;
448 	struct bpf_prog *xdp_prog;
449 
450 	len -= vi->hdr_len;
451 	skb_trim(skb, len);
452 
453 	rcu_read_lock();
454 	xdp_prog = rcu_dereference(rq->xdp_prog);
455 	if (xdp_prog) {
456 		struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
457 		u32 act;
458 
459 		if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
460 			goto err_xdp;
461 		act = do_xdp_prog(vi, rq, xdp_prog, skb, len);
462 		switch (act) {
463 		case XDP_PASS:
464 			break;
465 		case XDP_TX:
466 			rcu_read_unlock();
467 			goto xdp_xmit;
468 		case XDP_DROP:
469 		default:
470 			goto err_xdp;
471 		}
472 	}
473 	rcu_read_unlock();
474 
475 	return skb;
476 
477 err_xdp:
478 	rcu_read_unlock();
479 	dev->stats.rx_dropped++;
480 	kfree_skb(skb);
481 xdp_xmit:
482 	return NULL;
483 }
484 
485 static struct sk_buff *receive_big(struct net_device *dev,
486 				   struct virtnet_info *vi,
487 				   struct receive_queue *rq,
488 				   void *buf,
489 				   unsigned int len)
490 {
491 	struct page *page = buf;
492 	struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
493 
494 	if (unlikely(!skb))
495 		goto err;
496 
497 	return skb;
498 
499 err:
500 	dev->stats.rx_dropped++;
501 	give_pages(rq, page);
502 	return NULL;
503 }
504 
505 /* The conditions to enable XDP should preclude the underlying device from
506  * sending packets across multiple buffers (num_buf > 1). However per spec
507  * it does not appear to be illegal to do so but rather just against convention.
508  * So in order to avoid making a system unresponsive the packets are pushed
509  * into a page and the XDP program is run. This will be extremely slow and we
510  * push a warning to the user to fix this as soon as possible. Fixing this may
511  * require resolving the underlying hardware to determine why multiple buffers
512  * are being received or simply loading the XDP program in the ingress stack
513  * after the skb is built because there is no advantage to running it here
514  * anymore.
515  */
516 static struct page *xdp_linearize_page(struct receive_queue *rq,
517 				       u16 *num_buf,
518 				       struct page *p,
519 				       int offset,
520 				       unsigned int *len)
521 {
522 	struct page *page = alloc_page(GFP_ATOMIC);
523 	unsigned int page_off = 0;
524 
525 	if (!page)
526 		return NULL;
527 
528 	memcpy(page_address(page) + page_off, page_address(p) + offset, *len);
529 	page_off += *len;
530 
531 	while (--*num_buf) {
532 		unsigned int buflen;
533 		unsigned long ctx;
534 		void *buf;
535 		int off;
536 
537 		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &buflen);
538 		if (unlikely(!ctx))
539 			goto err_buf;
540 
541 		buf = mergeable_ctx_to_buf_address(ctx);
542 		p = virt_to_head_page(buf);
543 		off = buf - page_address(p);
544 
545 		/* guard against a misconfigured or uncooperative backend that
546 		 * is sending packet larger than the MTU.
547 		 */
548 		if ((page_off + buflen) > PAGE_SIZE) {
549 			put_page(p);
550 			goto err_buf;
551 		}
552 
553 		memcpy(page_address(page) + page_off,
554 		       page_address(p) + off, buflen);
555 		page_off += buflen;
556 		put_page(p);
557 	}
558 
559 	*len = page_off;
560 	return page;
561 err_buf:
562 	__free_pages(page, 0);
563 	return NULL;
564 }
565 
566 static struct sk_buff *receive_mergeable(struct net_device *dev,
567 					 struct virtnet_info *vi,
568 					 struct receive_queue *rq,
569 					 unsigned long ctx,
570 					 unsigned int len)
571 {
572 	void *buf = mergeable_ctx_to_buf_address(ctx);
573 	struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
574 	u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
575 	struct page *page = virt_to_head_page(buf);
576 	int offset = buf - page_address(page);
577 	struct sk_buff *head_skb, *curr_skb;
578 	struct bpf_prog *xdp_prog;
579 	unsigned int truesize;
580 
581 	head_skb = NULL;
582 
583 	rcu_read_lock();
584 	xdp_prog = rcu_dereference(rq->xdp_prog);
585 	if (xdp_prog) {
586 		struct page *xdp_page;
587 		u32 act;
588 
589 		/* This happens when rx buffer size is underestimated */
590 		if (unlikely(num_buf > 1)) {
591 			/* linearize data for XDP */
592 			xdp_page = xdp_linearize_page(rq, &num_buf,
593 						      page, offset, &len);
594 			if (!xdp_page)
595 				goto err_xdp;
596 			offset = 0;
597 		} else {
598 			xdp_page = page;
599 		}
600 
601 		/* Transient failure which in theory could occur if
602 		 * in-flight packets from before XDP was enabled reach
603 		 * the receive path after XDP is loaded. In practice I
604 		 * was not able to create this condition.
605 		 */
606 		if (unlikely(hdr->hdr.gso_type))
607 			goto err_xdp;
608 
609 		act = do_xdp_prog(vi, rq, xdp_prog,
610 				  page_address(xdp_page) + offset, len);
611 		switch (act) {
612 		case XDP_PASS:
613 			/* We can only create skb based on xdp_page. */
614 			if (unlikely(xdp_page != page)) {
615 				rcu_read_unlock();
616 				put_page(page);
617 				head_skb = page_to_skb(vi, rq, xdp_page,
618 						       0, len, PAGE_SIZE);
619 				ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
620 				return head_skb;
621 			}
622 			break;
623 		case XDP_TX:
624 			ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
625 			if (unlikely(xdp_page != page))
626 				goto err_xdp;
627 			rcu_read_unlock();
628 			goto xdp_xmit;
629 		case XDP_DROP:
630 		default:
631 			if (unlikely(xdp_page != page))
632 				__free_pages(xdp_page, 0);
633 			ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
634 			goto err_xdp;
635 		}
636 	}
637 	rcu_read_unlock();
638 
639 	truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
640 	head_skb = page_to_skb(vi, rq, page, offset, len, truesize);
641 	curr_skb = head_skb;
642 
643 	if (unlikely(!curr_skb))
644 		goto err_skb;
645 	while (--num_buf) {
646 		int num_skb_frags;
647 
648 		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
649 		if (unlikely(!ctx)) {
650 			pr_debug("%s: rx error: %d buffers out of %d missing\n",
651 				 dev->name, num_buf,
652 				 virtio16_to_cpu(vi->vdev,
653 						 hdr->num_buffers));
654 			dev->stats.rx_length_errors++;
655 			goto err_buf;
656 		}
657 
658 		buf = mergeable_ctx_to_buf_address(ctx);
659 		page = virt_to_head_page(buf);
660 
661 		num_skb_frags = skb_shinfo(curr_skb)->nr_frags;
662 		if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) {
663 			struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC);
664 
665 			if (unlikely(!nskb))
666 				goto err_skb;
667 			if (curr_skb == head_skb)
668 				skb_shinfo(curr_skb)->frag_list = nskb;
669 			else
670 				curr_skb->next = nskb;
671 			curr_skb = nskb;
672 			head_skb->truesize += nskb->truesize;
673 			num_skb_frags = 0;
674 		}
675 		truesize = max(len, mergeable_ctx_to_buf_truesize(ctx));
676 		if (curr_skb != head_skb) {
677 			head_skb->data_len += len;
678 			head_skb->len += len;
679 			head_skb->truesize += truesize;
680 		}
681 		offset = buf - page_address(page);
682 		if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
683 			put_page(page);
684 			skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
685 					     len, truesize);
686 		} else {
687 			skb_add_rx_frag(curr_skb, num_skb_frags, page,
688 					offset, len, truesize);
689 		}
690 	}
691 
692 	ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len);
693 	return head_skb;
694 
695 err_xdp:
696 	rcu_read_unlock();
697 err_skb:
698 	put_page(page);
699 	while (--num_buf) {
700 		ctx = (unsigned long)virtqueue_get_buf(rq->vq, &len);
701 		if (unlikely(!ctx)) {
702 			pr_debug("%s: rx error: %d buffers missing\n",
703 				 dev->name, num_buf);
704 			dev->stats.rx_length_errors++;
705 			break;
706 		}
707 		page = virt_to_head_page(mergeable_ctx_to_buf_address(ctx));
708 		put_page(page);
709 	}
710 err_buf:
711 	dev->stats.rx_dropped++;
712 	dev_kfree_skb(head_skb);
713 xdp_xmit:
714 	return NULL;
715 }
716 
717 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
718 			void *buf, unsigned int len)
719 {
720 	struct net_device *dev = vi->dev;
721 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
722 	struct sk_buff *skb;
723 	struct virtio_net_hdr_mrg_rxbuf *hdr;
724 
725 	if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
726 		pr_debug("%s: short packet %i\n", dev->name, len);
727 		dev->stats.rx_length_errors++;
728 		if (vi->mergeable_rx_bufs) {
729 			unsigned long ctx = (unsigned long)buf;
730 			void *base = mergeable_ctx_to_buf_address(ctx);
731 			put_page(virt_to_head_page(base));
732 		} else if (vi->big_packets) {
733 			give_pages(rq, buf);
734 		} else {
735 			dev_kfree_skb(buf);
736 		}
737 		return;
738 	}
739 
740 	if (vi->mergeable_rx_bufs)
741 		skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len);
742 	else if (vi->big_packets)
743 		skb = receive_big(dev, vi, rq, buf, len);
744 	else
745 		skb = receive_small(dev, vi, rq, buf, len);
746 
747 	if (unlikely(!skb))
748 		return;
749 
750 	hdr = skb_vnet_hdr(skb);
751 
752 	u64_stats_update_begin(&stats->rx_syncp);
753 	stats->rx_bytes += skb->len;
754 	stats->rx_packets++;
755 	u64_stats_update_end(&stats->rx_syncp);
756 
757 	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID)
758 		skb->ip_summed = CHECKSUM_UNNECESSARY;
759 
760 	if (virtio_net_hdr_to_skb(skb, &hdr->hdr,
761 				  virtio_is_little_endian(vi->vdev))) {
762 		net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n",
763 				     dev->name, hdr->hdr.gso_type,
764 				     hdr->hdr.gso_size);
765 		goto frame_err;
766 	}
767 
768 	skb->protocol = eth_type_trans(skb, dev);
769 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
770 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
771 
772 	napi_gro_receive(&rq->napi, skb);
773 	return;
774 
775 frame_err:
776 	dev->stats.rx_frame_errors++;
777 	dev_kfree_skb(skb);
778 }
779 
780 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
781 			     gfp_t gfp)
782 {
783 	struct sk_buff *skb;
784 	struct virtio_net_hdr_mrg_rxbuf *hdr;
785 	int err;
786 
787 	skb = __netdev_alloc_skb_ip_align(vi->dev, GOOD_PACKET_LEN, gfp);
788 	if (unlikely(!skb))
789 		return -ENOMEM;
790 
791 	skb_put(skb, GOOD_PACKET_LEN);
792 
793 	hdr = skb_vnet_hdr(skb);
794 	sg_init_table(rq->sg, 2);
795 	sg_set_buf(rq->sg, hdr, vi->hdr_len);
796 	skb_to_sgvec(skb, rq->sg + 1, 0, skb->len);
797 
798 	err = virtqueue_add_inbuf(rq->vq, rq->sg, 2, skb, gfp);
799 	if (err < 0)
800 		dev_kfree_skb(skb);
801 
802 	return err;
803 }
804 
805 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
806 			   gfp_t gfp)
807 {
808 	struct page *first, *list = NULL;
809 	char *p;
810 	int i, err, offset;
811 
812 	sg_init_table(rq->sg, MAX_SKB_FRAGS + 2);
813 
814 	/* page in rq->sg[MAX_SKB_FRAGS + 1] is list tail */
815 	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
816 		first = get_a_page(rq, gfp);
817 		if (!first) {
818 			if (list)
819 				give_pages(rq, list);
820 			return -ENOMEM;
821 		}
822 		sg_set_buf(&rq->sg[i], page_address(first), PAGE_SIZE);
823 
824 		/* chain new page in list head to match sg */
825 		first->private = (unsigned long)list;
826 		list = first;
827 	}
828 
829 	first = get_a_page(rq, gfp);
830 	if (!first) {
831 		give_pages(rq, list);
832 		return -ENOMEM;
833 	}
834 	p = page_address(first);
835 
836 	/* rq->sg[0], rq->sg[1] share the same page */
837 	/* a separated rq->sg[0] for header - required in case !any_header_sg */
838 	sg_set_buf(&rq->sg[0], p, vi->hdr_len);
839 
840 	/* rq->sg[1] for data packet, from offset */
841 	offset = sizeof(struct padded_vnet_hdr);
842 	sg_set_buf(&rq->sg[1], p + offset, PAGE_SIZE - offset);
843 
844 	/* chain first in list head */
845 	first->private = (unsigned long)list;
846 	err = virtqueue_add_inbuf(rq->vq, rq->sg, MAX_SKB_FRAGS + 2,
847 				  first, gfp);
848 	if (err < 0)
849 		give_pages(rq, first);
850 
851 	return err;
852 }
853 
854 static unsigned int get_mergeable_buf_len(struct ewma_pkt_len *avg_pkt_len)
855 {
856 	const size_t hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
857 	unsigned int len;
858 
859 	len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len),
860 			GOOD_PACKET_LEN, PAGE_SIZE - hdr_len);
861 	return ALIGN(len, MERGEABLE_BUFFER_ALIGN);
862 }
863 
864 static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
865 {
866 	struct page_frag *alloc_frag = &rq->alloc_frag;
867 	char *buf;
868 	unsigned long ctx;
869 	int err;
870 	unsigned int len, hole;
871 
872 	len = get_mergeable_buf_len(&rq->mrg_avg_pkt_len);
873 	if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
874 		return -ENOMEM;
875 
876 	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
877 	ctx = mergeable_buf_to_ctx(buf, len);
878 	get_page(alloc_frag->page);
879 	alloc_frag->offset += len;
880 	hole = alloc_frag->size - alloc_frag->offset;
881 	if (hole < len) {
882 		/* To avoid internal fragmentation, if there is very likely not
883 		 * enough space for another buffer, add the remaining space to
884 		 * the current buffer. This extra space is not included in
885 		 * the truesize stored in ctx.
886 		 */
887 		len += hole;
888 		alloc_frag->offset += hole;
889 	}
890 
891 	sg_init_one(rq->sg, buf, len);
892 	err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, (void *)ctx, gfp);
893 	if (err < 0)
894 		put_page(virt_to_head_page(buf));
895 
896 	return err;
897 }
898 
899 /*
900  * Returns false if we couldn't fill entirely (OOM).
901  *
902  * Normally run in the receive path, but can also be run from ndo_open
903  * before we're receiving packets, or from refill_work which is
904  * careful to disable receiving (using napi_disable).
905  */
906 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
907 			  gfp_t gfp)
908 {
909 	int err;
910 	bool oom;
911 
912 	gfp |= __GFP_COLD;
913 	do {
914 		if (vi->mergeable_rx_bufs)
915 			err = add_recvbuf_mergeable(rq, gfp);
916 		else if (vi->big_packets)
917 			err = add_recvbuf_big(vi, rq, gfp);
918 		else
919 			err = add_recvbuf_small(vi, rq, gfp);
920 
921 		oom = err == -ENOMEM;
922 		if (err)
923 			break;
924 	} while (rq->vq->num_free);
925 	virtqueue_kick(rq->vq);
926 	return !oom;
927 }
928 
929 static void skb_recv_done(struct virtqueue *rvq)
930 {
931 	struct virtnet_info *vi = rvq->vdev->priv;
932 	struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
933 
934 	/* Schedule NAPI, Suppress further interrupts if successful. */
935 	if (napi_schedule_prep(&rq->napi)) {
936 		virtqueue_disable_cb(rvq);
937 		__napi_schedule(&rq->napi);
938 	}
939 }
940 
941 static void virtnet_napi_enable(struct receive_queue *rq)
942 {
943 	napi_enable(&rq->napi);
944 
945 	/* If all buffers were filled by other side before we napi_enabled, we
946 	 * won't get another interrupt, so process any outstanding packets
947 	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
948 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
949 	if (napi_schedule_prep(&rq->napi)) {
950 		virtqueue_disable_cb(rq->vq);
951 		local_bh_disable();
952 		__napi_schedule(&rq->napi);
953 		local_bh_enable();
954 	}
955 }
956 
957 static void refill_work(struct work_struct *work)
958 {
959 	struct virtnet_info *vi =
960 		container_of(work, struct virtnet_info, refill.work);
961 	bool still_empty;
962 	int i;
963 
964 	for (i = 0; i < vi->curr_queue_pairs; i++) {
965 		struct receive_queue *rq = &vi->rq[i];
966 
967 		napi_disable(&rq->napi);
968 		still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
969 		virtnet_napi_enable(rq);
970 
971 		/* In theory, this can happen: if we don't get any buffers in
972 		 * we will *never* try to fill again.
973 		 */
974 		if (still_empty)
975 			schedule_delayed_work(&vi->refill, HZ/2);
976 	}
977 }
978 
979 static int virtnet_receive(struct receive_queue *rq, int budget)
980 {
981 	struct virtnet_info *vi = rq->vq->vdev->priv;
982 	unsigned int len, received = 0;
983 	void *buf;
984 
985 	while (received < budget &&
986 	       (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
987 		receive_buf(vi, rq, buf, len);
988 		received++;
989 	}
990 
991 	if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) {
992 		if (!try_fill_recv(vi, rq, GFP_ATOMIC))
993 			schedule_delayed_work(&vi->refill, 0);
994 	}
995 
996 	return received;
997 }
998 
999 static int virtnet_poll(struct napi_struct *napi, int budget)
1000 {
1001 	struct receive_queue *rq =
1002 		container_of(napi, struct receive_queue, napi);
1003 	unsigned int r, received;
1004 
1005 	received = virtnet_receive(rq, budget);
1006 
1007 	/* Out of packets? */
1008 	if (received < budget) {
1009 		r = virtqueue_enable_cb_prepare(rq->vq);
1010 		napi_complete_done(napi, received);
1011 		if (unlikely(virtqueue_poll(rq->vq, r)) &&
1012 		    napi_schedule_prep(napi)) {
1013 			virtqueue_disable_cb(rq->vq);
1014 			__napi_schedule(napi);
1015 		}
1016 	}
1017 
1018 	return received;
1019 }
1020 
1021 #ifdef CONFIG_NET_RX_BUSY_POLL
1022 /* must be called with local_bh_disable()d */
1023 static int virtnet_busy_poll(struct napi_struct *napi)
1024 {
1025 	struct receive_queue *rq =
1026 		container_of(napi, struct receive_queue, napi);
1027 	struct virtnet_info *vi = rq->vq->vdev->priv;
1028 	int r, received = 0, budget = 4;
1029 
1030 	if (!(vi->status & VIRTIO_NET_S_LINK_UP))
1031 		return LL_FLUSH_FAILED;
1032 
1033 	if (!napi_schedule_prep(napi))
1034 		return LL_FLUSH_BUSY;
1035 
1036 	virtqueue_disable_cb(rq->vq);
1037 
1038 again:
1039 	received += virtnet_receive(rq, budget);
1040 
1041 	r = virtqueue_enable_cb_prepare(rq->vq);
1042 	clear_bit(NAPI_STATE_SCHED, &napi->state);
1043 	if (unlikely(virtqueue_poll(rq->vq, r)) &&
1044 	    napi_schedule_prep(napi)) {
1045 		virtqueue_disable_cb(rq->vq);
1046 		if (received < budget) {
1047 			budget -= received;
1048 			goto again;
1049 		} else {
1050 			__napi_schedule(napi);
1051 		}
1052 	}
1053 
1054 	return received;
1055 }
1056 #endif	/* CONFIG_NET_RX_BUSY_POLL */
1057 
1058 static int virtnet_open(struct net_device *dev)
1059 {
1060 	struct virtnet_info *vi = netdev_priv(dev);
1061 	int i;
1062 
1063 	for (i = 0; i < vi->max_queue_pairs; i++) {
1064 		if (i < vi->curr_queue_pairs)
1065 			/* Make sure we have some buffers: if oom use wq. */
1066 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1067 				schedule_delayed_work(&vi->refill, 0);
1068 		virtnet_napi_enable(&vi->rq[i]);
1069 	}
1070 
1071 	return 0;
1072 }
1073 
1074 static void free_old_xmit_skbs(struct send_queue *sq)
1075 {
1076 	struct sk_buff *skb;
1077 	unsigned int len;
1078 	struct virtnet_info *vi = sq->vq->vdev->priv;
1079 	struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
1080 
1081 	while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) {
1082 		pr_debug("Sent skb %p\n", skb);
1083 
1084 		u64_stats_update_begin(&stats->tx_syncp);
1085 		stats->tx_bytes += skb->len;
1086 		stats->tx_packets++;
1087 		u64_stats_update_end(&stats->tx_syncp);
1088 
1089 		dev_kfree_skb_any(skb);
1090 	}
1091 }
1092 
1093 static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
1094 {
1095 	struct virtio_net_hdr_mrg_rxbuf *hdr;
1096 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
1097 	struct virtnet_info *vi = sq->vq->vdev->priv;
1098 	unsigned num_sg;
1099 	unsigned hdr_len = vi->hdr_len;
1100 	bool can_push;
1101 
1102 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1103 
1104 	can_push = vi->any_header_sg &&
1105 		!((unsigned long)skb->data & (__alignof__(*hdr) - 1)) &&
1106 		!skb_header_cloned(skb) && skb_headroom(skb) >= hdr_len;
1107 	/* Even if we can, don't push here yet as this would skew
1108 	 * csum_start offset below. */
1109 	if (can_push)
1110 		hdr = (struct virtio_net_hdr_mrg_rxbuf *)(skb->data - hdr_len);
1111 	else
1112 		hdr = skb_vnet_hdr(skb);
1113 
1114 	if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
1115 				    virtio_is_little_endian(vi->vdev), false))
1116 		BUG();
1117 
1118 	if (vi->mergeable_rx_bufs)
1119 		hdr->num_buffers = 0;
1120 
1121 	sg_init_table(sq->sg, skb_shinfo(skb)->nr_frags + (can_push ? 1 : 2));
1122 	if (can_push) {
1123 		__skb_push(skb, hdr_len);
1124 		num_sg = skb_to_sgvec(skb, sq->sg, 0, skb->len);
1125 		/* Pull header back to avoid skew in tx bytes calculations. */
1126 		__skb_pull(skb, hdr_len);
1127 	} else {
1128 		sg_set_buf(sq->sg, hdr, hdr_len);
1129 		num_sg = skb_to_sgvec(skb, sq->sg + 1, 0, skb->len) + 1;
1130 	}
1131 	return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC);
1132 }
1133 
1134 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
1135 {
1136 	struct virtnet_info *vi = netdev_priv(dev);
1137 	int qnum = skb_get_queue_mapping(skb);
1138 	struct send_queue *sq = &vi->sq[qnum];
1139 	int err;
1140 	struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum);
1141 	bool kick = !skb->xmit_more;
1142 
1143 	/* Free up any pending old buffers before queueing new ones. */
1144 	free_old_xmit_skbs(sq);
1145 
1146 	/* timestamp packet in software */
1147 	skb_tx_timestamp(skb);
1148 
1149 	/* Try to transmit */
1150 	err = xmit_skb(sq, skb);
1151 
1152 	/* This should not happen! */
1153 	if (unlikely(err)) {
1154 		dev->stats.tx_fifo_errors++;
1155 		if (net_ratelimit())
1156 			dev_warn(&dev->dev,
1157 				 "Unexpected TXQ (%d) queue failure: %d\n", qnum, err);
1158 		dev->stats.tx_dropped++;
1159 		dev_kfree_skb_any(skb);
1160 		return NETDEV_TX_OK;
1161 	}
1162 
1163 	/* Don't wait up for transmitted skbs to be freed. */
1164 	skb_orphan(skb);
1165 	nf_reset(skb);
1166 
1167 	/* If running out of space, stop queue to avoid getting packets that we
1168 	 * are then unable to transmit.
1169 	 * An alternative would be to force queuing layer to requeue the skb by
1170 	 * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be
1171 	 * returned in a normal path of operation: it means that driver is not
1172 	 * maintaining the TX queue stop/start state properly, and causes
1173 	 * the stack to do a non-trivial amount of useless work.
1174 	 * Since most packets only take 1 or 2 ring slots, stopping the queue
1175 	 * early means 16 slots are typically wasted.
1176 	 */
1177 	if (sq->vq->num_free < 2+MAX_SKB_FRAGS) {
1178 		netif_stop_subqueue(dev, qnum);
1179 		if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) {
1180 			/* More just got used, free them then recheck. */
1181 			free_old_xmit_skbs(sq);
1182 			if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) {
1183 				netif_start_subqueue(dev, qnum);
1184 				virtqueue_disable_cb(sq->vq);
1185 			}
1186 		}
1187 	}
1188 
1189 	if (kick || netif_xmit_stopped(txq))
1190 		virtqueue_kick(sq->vq);
1191 
1192 	return NETDEV_TX_OK;
1193 }
1194 
1195 /*
1196  * Send command via the control virtqueue and check status.  Commands
1197  * supported by the hypervisor, as indicated by feature bits, should
1198  * never fail unless improperly formatted.
1199  */
1200 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1201 				 struct scatterlist *out)
1202 {
1203 	struct scatterlist *sgs[4], hdr, stat;
1204 	unsigned out_num = 0, tmp;
1205 
1206 	/* Caller should know better */
1207 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1208 
1209 	vi->ctrl_status = ~0;
1210 	vi->ctrl_hdr.class = class;
1211 	vi->ctrl_hdr.cmd = cmd;
1212 	/* Add header */
1213 	sg_init_one(&hdr, &vi->ctrl_hdr, sizeof(vi->ctrl_hdr));
1214 	sgs[out_num++] = &hdr;
1215 
1216 	if (out)
1217 		sgs[out_num++] = out;
1218 
1219 	/* Add return status. */
1220 	sg_init_one(&stat, &vi->ctrl_status, sizeof(vi->ctrl_status));
1221 	sgs[out_num] = &stat;
1222 
1223 	BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
1224 	virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1225 
1226 	if (unlikely(!virtqueue_kick(vi->cvq)))
1227 		return vi->ctrl_status == VIRTIO_NET_OK;
1228 
1229 	/* Spin for a response, the kick causes an ioport write, trapping
1230 	 * into the hypervisor, so the request should be handled immediately.
1231 	 */
1232 	while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1233 	       !virtqueue_is_broken(vi->cvq))
1234 		cpu_relax();
1235 
1236 	return vi->ctrl_status == VIRTIO_NET_OK;
1237 }
1238 
1239 static int virtnet_set_mac_address(struct net_device *dev, void *p)
1240 {
1241 	struct virtnet_info *vi = netdev_priv(dev);
1242 	struct virtio_device *vdev = vi->vdev;
1243 	int ret;
1244 	struct sockaddr *addr;
1245 	struct scatterlist sg;
1246 
1247 	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
1248 	if (!addr)
1249 		return -ENOMEM;
1250 	memcpy(addr, p, sizeof(*addr));
1251 
1252 	ret = eth_prepare_mac_addr_change(dev, addr);
1253 	if (ret)
1254 		goto out;
1255 
1256 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
1257 		sg_init_one(&sg, addr->sa_data, dev->addr_len);
1258 		if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1259 					  VIRTIO_NET_CTRL_MAC_ADDR_SET, &sg)) {
1260 			dev_warn(&vdev->dev,
1261 				 "Failed to set mac address by vq command.\n");
1262 			ret = -EINVAL;
1263 			goto out;
1264 		}
1265 	} else if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC) &&
1266 		   !virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
1267 		unsigned int i;
1268 
1269 		/* Naturally, this has an atomicity problem. */
1270 		for (i = 0; i < dev->addr_len; i++)
1271 			virtio_cwrite8(vdev,
1272 				       offsetof(struct virtio_net_config, mac) +
1273 				       i, addr->sa_data[i]);
1274 	}
1275 
1276 	eth_commit_mac_addr_change(dev, p);
1277 	ret = 0;
1278 
1279 out:
1280 	kfree(addr);
1281 	return ret;
1282 }
1283 
1284 static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
1285 					       struct rtnl_link_stats64 *tot)
1286 {
1287 	struct virtnet_info *vi = netdev_priv(dev);
1288 	int cpu;
1289 	unsigned int start;
1290 
1291 	for_each_possible_cpu(cpu) {
1292 		struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
1293 		u64 tpackets, tbytes, rpackets, rbytes;
1294 
1295 		do {
1296 			start = u64_stats_fetch_begin_irq(&stats->tx_syncp);
1297 			tpackets = stats->tx_packets;
1298 			tbytes   = stats->tx_bytes;
1299 		} while (u64_stats_fetch_retry_irq(&stats->tx_syncp, start));
1300 
1301 		do {
1302 			start = u64_stats_fetch_begin_irq(&stats->rx_syncp);
1303 			rpackets = stats->rx_packets;
1304 			rbytes   = stats->rx_bytes;
1305 		} while (u64_stats_fetch_retry_irq(&stats->rx_syncp, start));
1306 
1307 		tot->rx_packets += rpackets;
1308 		tot->tx_packets += tpackets;
1309 		tot->rx_bytes   += rbytes;
1310 		tot->tx_bytes   += tbytes;
1311 	}
1312 
1313 	tot->tx_dropped = dev->stats.tx_dropped;
1314 	tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
1315 	tot->rx_dropped = dev->stats.rx_dropped;
1316 	tot->rx_length_errors = dev->stats.rx_length_errors;
1317 	tot->rx_frame_errors = dev->stats.rx_frame_errors;
1318 
1319 	return tot;
1320 }
1321 
1322 #ifdef CONFIG_NET_POLL_CONTROLLER
1323 static void virtnet_netpoll(struct net_device *dev)
1324 {
1325 	struct virtnet_info *vi = netdev_priv(dev);
1326 	int i;
1327 
1328 	for (i = 0; i < vi->curr_queue_pairs; i++)
1329 		napi_schedule(&vi->rq[i].napi);
1330 }
1331 #endif
1332 
1333 static void virtnet_ack_link_announce(struct virtnet_info *vi)
1334 {
1335 	rtnl_lock();
1336 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1337 				  VIRTIO_NET_CTRL_ANNOUNCE_ACK, NULL))
1338 		dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1339 	rtnl_unlock();
1340 }
1341 
1342 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1343 {
1344 	struct scatterlist sg;
1345 	struct net_device *dev = vi->dev;
1346 
1347 	if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1348 		return 0;
1349 
1350 	vi->ctrl_mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1351 	sg_init_one(&sg, &vi->ctrl_mq, sizeof(vi->ctrl_mq));
1352 
1353 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1354 				  VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &sg)) {
1355 		dev_warn(&dev->dev, "Fail to set num of queue pairs to %d\n",
1356 			 queue_pairs);
1357 		return -EINVAL;
1358 	} else {
1359 		vi->curr_queue_pairs = queue_pairs;
1360 		/* virtnet_open() will refill when device is going to up. */
1361 		if (dev->flags & IFF_UP)
1362 			schedule_delayed_work(&vi->refill, 0);
1363 	}
1364 
1365 	return 0;
1366 }
1367 
1368 static int virtnet_close(struct net_device *dev)
1369 {
1370 	struct virtnet_info *vi = netdev_priv(dev);
1371 	int i;
1372 
1373 	/* Make sure refill_work doesn't re-enable napi! */
1374 	cancel_delayed_work_sync(&vi->refill);
1375 
1376 	for (i = 0; i < vi->max_queue_pairs; i++)
1377 		napi_disable(&vi->rq[i].napi);
1378 
1379 	return 0;
1380 }
1381 
1382 static void virtnet_set_rx_mode(struct net_device *dev)
1383 {
1384 	struct virtnet_info *vi = netdev_priv(dev);
1385 	struct scatterlist sg[2];
1386 	struct virtio_net_ctrl_mac *mac_data;
1387 	struct netdev_hw_addr *ha;
1388 	int uc_count;
1389 	int mc_count;
1390 	void *buf;
1391 	int i;
1392 
1393 	/* We can't dynamically set ndo_set_rx_mode, so return gracefully */
1394 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1395 		return;
1396 
1397 	vi->ctrl_promisc = ((dev->flags & IFF_PROMISC) != 0);
1398 	vi->ctrl_allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1399 
1400 	sg_init_one(sg, &vi->ctrl_promisc, sizeof(vi->ctrl_promisc));
1401 
1402 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1403 				  VIRTIO_NET_CTRL_RX_PROMISC, sg))
1404 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
1405 			 vi->ctrl_promisc ? "en" : "dis");
1406 
1407 	sg_init_one(sg, &vi->ctrl_allmulti, sizeof(vi->ctrl_allmulti));
1408 
1409 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1410 				  VIRTIO_NET_CTRL_RX_ALLMULTI, sg))
1411 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
1412 			 vi->ctrl_allmulti ? "en" : "dis");
1413 
1414 	uc_count = netdev_uc_count(dev);
1415 	mc_count = netdev_mc_count(dev);
1416 	/* MAC filter - use one buffer for both lists */
1417 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
1418 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
1419 	mac_data = buf;
1420 	if (!buf)
1421 		return;
1422 
1423 	sg_init_table(sg, 2);
1424 
1425 	/* Store the unicast list and count in the front of the buffer */
1426 	mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1427 	i = 0;
1428 	netdev_for_each_uc_addr(ha, dev)
1429 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1430 
1431 	sg_set_buf(&sg[0], mac_data,
1432 		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
1433 
1434 	/* multicast list and count fill the end */
1435 	mac_data = (void *)&mac_data->macs[uc_count][0];
1436 
1437 	mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
1438 	i = 0;
1439 	netdev_for_each_mc_addr(ha, dev)
1440 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
1441 
1442 	sg_set_buf(&sg[1], mac_data,
1443 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
1444 
1445 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1446 				  VIRTIO_NET_CTRL_MAC_TABLE_SET, sg))
1447 		dev_warn(&dev->dev, "Failed to set MAC filter table.\n");
1448 
1449 	kfree(buf);
1450 }
1451 
1452 static int virtnet_vlan_rx_add_vid(struct net_device *dev,
1453 				   __be16 proto, u16 vid)
1454 {
1455 	struct virtnet_info *vi = netdev_priv(dev);
1456 	struct scatterlist sg;
1457 
1458 	vi->ctrl_vid = vid;
1459 	sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
1460 
1461 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1462 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg))
1463 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
1464 	return 0;
1465 }
1466 
1467 static int virtnet_vlan_rx_kill_vid(struct net_device *dev,
1468 				    __be16 proto, u16 vid)
1469 {
1470 	struct virtnet_info *vi = netdev_priv(dev);
1471 	struct scatterlist sg;
1472 
1473 	vi->ctrl_vid = vid;
1474 	sg_init_one(&sg, &vi->ctrl_vid, sizeof(vi->ctrl_vid));
1475 
1476 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
1477 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg))
1478 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
1479 	return 0;
1480 }
1481 
1482 static void virtnet_clean_affinity(struct virtnet_info *vi, long hcpu)
1483 {
1484 	int i;
1485 
1486 	if (vi->affinity_hint_set) {
1487 		for (i = 0; i < vi->max_queue_pairs; i++) {
1488 			virtqueue_set_affinity(vi->rq[i].vq, -1);
1489 			virtqueue_set_affinity(vi->sq[i].vq, -1);
1490 		}
1491 
1492 		vi->affinity_hint_set = false;
1493 	}
1494 }
1495 
1496 static void virtnet_set_affinity(struct virtnet_info *vi)
1497 {
1498 	int i;
1499 	int cpu;
1500 
1501 	/* In multiqueue mode, when the number of cpu is equal to the number of
1502 	 * queue pairs, we let the queue pairs to be private to one cpu by
1503 	 * setting the affinity hint to eliminate the contention.
1504 	 */
1505 	if (vi->curr_queue_pairs == 1 ||
1506 	    vi->max_queue_pairs != num_online_cpus()) {
1507 		virtnet_clean_affinity(vi, -1);
1508 		return;
1509 	}
1510 
1511 	i = 0;
1512 	for_each_online_cpu(cpu) {
1513 		virtqueue_set_affinity(vi->rq[i].vq, cpu);
1514 		virtqueue_set_affinity(vi->sq[i].vq, cpu);
1515 		netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
1516 		i++;
1517 	}
1518 
1519 	vi->affinity_hint_set = true;
1520 }
1521 
1522 static int virtnet_cpu_online(unsigned int cpu, struct hlist_node *node)
1523 {
1524 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1525 						   node);
1526 	virtnet_set_affinity(vi);
1527 	return 0;
1528 }
1529 
1530 static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node)
1531 {
1532 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1533 						   node_dead);
1534 	virtnet_set_affinity(vi);
1535 	return 0;
1536 }
1537 
1538 static int virtnet_cpu_down_prep(unsigned int cpu, struct hlist_node *node)
1539 {
1540 	struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
1541 						   node);
1542 
1543 	virtnet_clean_affinity(vi, cpu);
1544 	return 0;
1545 }
1546 
1547 static enum cpuhp_state virtionet_online;
1548 
1549 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
1550 {
1551 	int ret;
1552 
1553 	ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
1554 	if (ret)
1555 		return ret;
1556 	ret = cpuhp_state_add_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1557 					       &vi->node_dead);
1558 	if (!ret)
1559 		return ret;
1560 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1561 	return ret;
1562 }
1563 
1564 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
1565 {
1566 	cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
1567 	cpuhp_state_remove_instance_nocalls(CPUHP_VIRT_NET_DEAD,
1568 					    &vi->node_dead);
1569 }
1570 
1571 static void virtnet_get_ringparam(struct net_device *dev,
1572 				struct ethtool_ringparam *ring)
1573 {
1574 	struct virtnet_info *vi = netdev_priv(dev);
1575 
1576 	ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
1577 	ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
1578 	ring->rx_pending = ring->rx_max_pending;
1579 	ring->tx_pending = ring->tx_max_pending;
1580 }
1581 
1582 
1583 static void virtnet_get_drvinfo(struct net_device *dev,
1584 				struct ethtool_drvinfo *info)
1585 {
1586 	struct virtnet_info *vi = netdev_priv(dev);
1587 	struct virtio_device *vdev = vi->vdev;
1588 
1589 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1590 	strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
1591 	strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
1592 
1593 }
1594 
1595 /* TODO: Eliminate OOO packets during switching */
1596 static int virtnet_set_channels(struct net_device *dev,
1597 				struct ethtool_channels *channels)
1598 {
1599 	struct virtnet_info *vi = netdev_priv(dev);
1600 	u16 queue_pairs = channels->combined_count;
1601 	int err;
1602 
1603 	/* We don't support separate rx/tx channels.
1604 	 * We don't allow setting 'other' channels.
1605 	 */
1606 	if (channels->rx_count || channels->tx_count || channels->other_count)
1607 		return -EINVAL;
1608 
1609 	if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
1610 		return -EINVAL;
1611 
1612 	/* For now we don't support modifying channels while XDP is loaded
1613 	 * also when XDP is loaded all RX queues have XDP programs so we only
1614 	 * need to check a single RX queue.
1615 	 */
1616 	if (vi->rq[0].xdp_prog)
1617 		return -EINVAL;
1618 
1619 	get_online_cpus();
1620 	err = virtnet_set_queues(vi, queue_pairs);
1621 	if (!err) {
1622 		netif_set_real_num_tx_queues(dev, queue_pairs);
1623 		netif_set_real_num_rx_queues(dev, queue_pairs);
1624 
1625 		virtnet_set_affinity(vi);
1626 	}
1627 	put_online_cpus();
1628 
1629 	return err;
1630 }
1631 
1632 static void virtnet_get_channels(struct net_device *dev,
1633 				 struct ethtool_channels *channels)
1634 {
1635 	struct virtnet_info *vi = netdev_priv(dev);
1636 
1637 	channels->combined_count = vi->curr_queue_pairs;
1638 	channels->max_combined = vi->max_queue_pairs;
1639 	channels->max_other = 0;
1640 	channels->rx_count = 0;
1641 	channels->tx_count = 0;
1642 	channels->other_count = 0;
1643 }
1644 
1645 /* Check if the user is trying to change anything besides speed/duplex */
1646 static bool virtnet_validate_ethtool_cmd(const struct ethtool_cmd *cmd)
1647 {
1648 	struct ethtool_cmd diff1 = *cmd;
1649 	struct ethtool_cmd diff2 = {};
1650 
1651 	/* cmd is always set so we need to clear it, validate the port type
1652 	 * and also without autonegotiation we can ignore advertising
1653 	 */
1654 	ethtool_cmd_speed_set(&diff1, 0);
1655 	diff2.port = PORT_OTHER;
1656 	diff1.advertising = 0;
1657 	diff1.duplex = 0;
1658 	diff1.cmd = 0;
1659 
1660 	return !memcmp(&diff1, &diff2, sizeof(diff1));
1661 }
1662 
1663 static int virtnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1664 {
1665 	struct virtnet_info *vi = netdev_priv(dev);
1666 	u32 speed;
1667 
1668 	speed = ethtool_cmd_speed(cmd);
1669 	/* don't allow custom speed and duplex */
1670 	if (!ethtool_validate_speed(speed) ||
1671 	    !ethtool_validate_duplex(cmd->duplex) ||
1672 	    !virtnet_validate_ethtool_cmd(cmd))
1673 		return -EINVAL;
1674 	vi->speed = speed;
1675 	vi->duplex = cmd->duplex;
1676 
1677 	return 0;
1678 }
1679 
1680 static int virtnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1681 {
1682 	struct virtnet_info *vi = netdev_priv(dev);
1683 
1684 	ethtool_cmd_speed_set(cmd, vi->speed);
1685 	cmd->duplex = vi->duplex;
1686 	cmd->port = PORT_OTHER;
1687 
1688 	return 0;
1689 }
1690 
1691 static void virtnet_init_settings(struct net_device *dev)
1692 {
1693 	struct virtnet_info *vi = netdev_priv(dev);
1694 
1695 	vi->speed = SPEED_UNKNOWN;
1696 	vi->duplex = DUPLEX_UNKNOWN;
1697 }
1698 
1699 static const struct ethtool_ops virtnet_ethtool_ops = {
1700 	.get_drvinfo = virtnet_get_drvinfo,
1701 	.get_link = ethtool_op_get_link,
1702 	.get_ringparam = virtnet_get_ringparam,
1703 	.set_channels = virtnet_set_channels,
1704 	.get_channels = virtnet_get_channels,
1705 	.get_ts_info = ethtool_op_get_ts_info,
1706 	.get_settings = virtnet_get_settings,
1707 	.set_settings = virtnet_set_settings,
1708 };
1709 
1710 static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
1711 {
1712 	unsigned long int max_sz = PAGE_SIZE - sizeof(struct padded_vnet_hdr);
1713 	struct virtnet_info *vi = netdev_priv(dev);
1714 	struct bpf_prog *old_prog;
1715 	u16 xdp_qp = 0, curr_qp;
1716 	int i, err;
1717 
1718 	if (prog && prog->xdp_adjust_head) {
1719 		netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
1720 		return -EOPNOTSUPP;
1721 	}
1722 
1723 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1724 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1725 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
1726 	    virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO)) {
1727 		netdev_warn(dev, "can't set XDP while host is implementing LRO, disable LRO first\n");
1728 		return -EOPNOTSUPP;
1729 	}
1730 
1731 	if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
1732 		netdev_warn(dev, "XDP expects header/data in single page, any_header_sg required\n");
1733 		return -EINVAL;
1734 	}
1735 
1736 	if (dev->mtu > max_sz) {
1737 		netdev_warn(dev, "XDP requires MTU less than %lu\n", max_sz);
1738 		return -EINVAL;
1739 	}
1740 
1741 	curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
1742 	if (prog)
1743 		xdp_qp = nr_cpu_ids;
1744 
1745 	/* XDP requires extra queues for XDP_TX */
1746 	if (curr_qp + xdp_qp > vi->max_queue_pairs) {
1747 		netdev_warn(dev, "request %i queues but max is %i\n",
1748 			    curr_qp + xdp_qp, vi->max_queue_pairs);
1749 		return -ENOMEM;
1750 	}
1751 
1752 	err = virtnet_set_queues(vi, curr_qp + xdp_qp);
1753 	if (err) {
1754 		dev_warn(&dev->dev, "XDP Device queue allocation failure.\n");
1755 		return err;
1756 	}
1757 
1758 	if (prog) {
1759 		prog = bpf_prog_add(prog, vi->max_queue_pairs - 1);
1760 		if (IS_ERR(prog)) {
1761 			virtnet_set_queues(vi, curr_qp);
1762 			return PTR_ERR(prog);
1763 		}
1764 	}
1765 
1766 	vi->xdp_queue_pairs = xdp_qp;
1767 	netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp);
1768 
1769 	for (i = 0; i < vi->max_queue_pairs; i++) {
1770 		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
1771 		rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
1772 		if (old_prog)
1773 			bpf_prog_put(old_prog);
1774 	}
1775 
1776 	return 0;
1777 }
1778 
1779 static bool virtnet_xdp_query(struct net_device *dev)
1780 {
1781 	struct virtnet_info *vi = netdev_priv(dev);
1782 	int i;
1783 
1784 	for (i = 0; i < vi->max_queue_pairs; i++) {
1785 		if (vi->rq[i].xdp_prog)
1786 			return true;
1787 	}
1788 	return false;
1789 }
1790 
1791 static int virtnet_xdp(struct net_device *dev, struct netdev_xdp *xdp)
1792 {
1793 	switch (xdp->command) {
1794 	case XDP_SETUP_PROG:
1795 		return virtnet_xdp_set(dev, xdp->prog);
1796 	case XDP_QUERY_PROG:
1797 		xdp->prog_attached = virtnet_xdp_query(dev);
1798 		return 0;
1799 	default:
1800 		return -EINVAL;
1801 	}
1802 }
1803 
1804 static const struct net_device_ops virtnet_netdev = {
1805 	.ndo_open            = virtnet_open,
1806 	.ndo_stop   	     = virtnet_close,
1807 	.ndo_start_xmit      = start_xmit,
1808 	.ndo_validate_addr   = eth_validate_addr,
1809 	.ndo_set_mac_address = virtnet_set_mac_address,
1810 	.ndo_set_rx_mode     = virtnet_set_rx_mode,
1811 	.ndo_get_stats64     = virtnet_stats,
1812 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
1813 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
1814 #ifdef CONFIG_NET_POLL_CONTROLLER
1815 	.ndo_poll_controller = virtnet_netpoll,
1816 #endif
1817 #ifdef CONFIG_NET_RX_BUSY_POLL
1818 	.ndo_busy_poll		= virtnet_busy_poll,
1819 #endif
1820 	.ndo_xdp		= virtnet_xdp,
1821 };
1822 
1823 static void virtnet_config_changed_work(struct work_struct *work)
1824 {
1825 	struct virtnet_info *vi =
1826 		container_of(work, struct virtnet_info, config_work);
1827 	u16 v;
1828 
1829 	if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
1830 				 struct virtio_net_config, status, &v) < 0)
1831 		return;
1832 
1833 	if (v & VIRTIO_NET_S_ANNOUNCE) {
1834 		netdev_notify_peers(vi->dev);
1835 		virtnet_ack_link_announce(vi);
1836 	}
1837 
1838 	/* Ignore unknown (future) status bits */
1839 	v &= VIRTIO_NET_S_LINK_UP;
1840 
1841 	if (vi->status == v)
1842 		return;
1843 
1844 	vi->status = v;
1845 
1846 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
1847 		netif_carrier_on(vi->dev);
1848 		netif_tx_wake_all_queues(vi->dev);
1849 	} else {
1850 		netif_carrier_off(vi->dev);
1851 		netif_tx_stop_all_queues(vi->dev);
1852 	}
1853 }
1854 
1855 static void virtnet_config_changed(struct virtio_device *vdev)
1856 {
1857 	struct virtnet_info *vi = vdev->priv;
1858 
1859 	schedule_work(&vi->config_work);
1860 }
1861 
1862 static void virtnet_free_queues(struct virtnet_info *vi)
1863 {
1864 	int i;
1865 
1866 	for (i = 0; i < vi->max_queue_pairs; i++) {
1867 		napi_hash_del(&vi->rq[i].napi);
1868 		netif_napi_del(&vi->rq[i].napi);
1869 	}
1870 
1871 	/* We called napi_hash_del() before netif_napi_del(),
1872 	 * we need to respect an RCU grace period before freeing vi->rq
1873 	 */
1874 	synchronize_net();
1875 
1876 	kfree(vi->rq);
1877 	kfree(vi->sq);
1878 }
1879 
1880 static void free_receive_bufs(struct virtnet_info *vi)
1881 {
1882 	struct bpf_prog *old_prog;
1883 	int i;
1884 
1885 	rtnl_lock();
1886 	for (i = 0; i < vi->max_queue_pairs; i++) {
1887 		while (vi->rq[i].pages)
1888 			__free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
1889 
1890 		old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
1891 		RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
1892 		if (old_prog)
1893 			bpf_prog_put(old_prog);
1894 	}
1895 	rtnl_unlock();
1896 }
1897 
1898 static void free_receive_page_frags(struct virtnet_info *vi)
1899 {
1900 	int i;
1901 	for (i = 0; i < vi->max_queue_pairs; i++)
1902 		if (vi->rq[i].alloc_frag.page)
1903 			put_page(vi->rq[i].alloc_frag.page);
1904 }
1905 
1906 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1907 {
1908 	/* For small receive mode always use kfree_skb variants */
1909 	if (!vi->mergeable_rx_bufs)
1910 		return false;
1911 
1912 	if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1913 		return false;
1914 	else if (q < vi->curr_queue_pairs)
1915 		return true;
1916 	else
1917 		return false;
1918 }
1919 
1920 static void free_unused_bufs(struct virtnet_info *vi)
1921 {
1922 	void *buf;
1923 	int i;
1924 
1925 	for (i = 0; i < vi->max_queue_pairs; i++) {
1926 		struct virtqueue *vq = vi->sq[i].vq;
1927 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1928 			if (!is_xdp_raw_buffer_queue(vi, i))
1929 				dev_kfree_skb(buf);
1930 			else
1931 				put_page(virt_to_head_page(buf));
1932 		}
1933 	}
1934 
1935 	for (i = 0; i < vi->max_queue_pairs; i++) {
1936 		struct virtqueue *vq = vi->rq[i].vq;
1937 
1938 		while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
1939 			if (vi->mergeable_rx_bufs) {
1940 				unsigned long ctx = (unsigned long)buf;
1941 				void *base = mergeable_ctx_to_buf_address(ctx);
1942 				put_page(virt_to_head_page(base));
1943 			} else if (vi->big_packets) {
1944 				give_pages(&vi->rq[i], buf);
1945 			} else {
1946 				dev_kfree_skb(buf);
1947 			}
1948 		}
1949 	}
1950 }
1951 
1952 static void virtnet_del_vqs(struct virtnet_info *vi)
1953 {
1954 	struct virtio_device *vdev = vi->vdev;
1955 
1956 	virtnet_clean_affinity(vi, -1);
1957 
1958 	vdev->config->del_vqs(vdev);
1959 
1960 	virtnet_free_queues(vi);
1961 }
1962 
1963 static int virtnet_find_vqs(struct virtnet_info *vi)
1964 {
1965 	vq_callback_t **callbacks;
1966 	struct virtqueue **vqs;
1967 	int ret = -ENOMEM;
1968 	int i, total_vqs;
1969 	const char **names;
1970 
1971 	/* We expect 1 RX virtqueue followed by 1 TX virtqueue, followed by
1972 	 * possible N-1 RX/TX queue pairs used in multiqueue mode, followed by
1973 	 * possible control vq.
1974 	 */
1975 	total_vqs = vi->max_queue_pairs * 2 +
1976 		    virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
1977 
1978 	/* Allocate space for find_vqs parameters */
1979 	vqs = kzalloc(total_vqs * sizeof(*vqs), GFP_KERNEL);
1980 	if (!vqs)
1981 		goto err_vq;
1982 	callbacks = kmalloc(total_vqs * sizeof(*callbacks), GFP_KERNEL);
1983 	if (!callbacks)
1984 		goto err_callback;
1985 	names = kmalloc(total_vqs * sizeof(*names), GFP_KERNEL);
1986 	if (!names)
1987 		goto err_names;
1988 
1989 	/* Parameters for control virtqueue, if any */
1990 	if (vi->has_cvq) {
1991 		callbacks[total_vqs - 1] = NULL;
1992 		names[total_vqs - 1] = "control";
1993 	}
1994 
1995 	/* Allocate/initialize parameters for send/receive virtqueues */
1996 	for (i = 0; i < vi->max_queue_pairs; i++) {
1997 		callbacks[rxq2vq(i)] = skb_recv_done;
1998 		callbacks[txq2vq(i)] = skb_xmit_done;
1999 		sprintf(vi->rq[i].name, "input.%d", i);
2000 		sprintf(vi->sq[i].name, "output.%d", i);
2001 		names[rxq2vq(i)] = vi->rq[i].name;
2002 		names[txq2vq(i)] = vi->sq[i].name;
2003 	}
2004 
2005 	ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
2006 					 names);
2007 	if (ret)
2008 		goto err_find;
2009 
2010 	if (vi->has_cvq) {
2011 		vi->cvq = vqs[total_vqs - 1];
2012 		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2013 			vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2014 	}
2015 
2016 	for (i = 0; i < vi->max_queue_pairs; i++) {
2017 		vi->rq[i].vq = vqs[rxq2vq(i)];
2018 		vi->sq[i].vq = vqs[txq2vq(i)];
2019 	}
2020 
2021 	kfree(names);
2022 	kfree(callbacks);
2023 	kfree(vqs);
2024 
2025 	return 0;
2026 
2027 err_find:
2028 	kfree(names);
2029 err_names:
2030 	kfree(callbacks);
2031 err_callback:
2032 	kfree(vqs);
2033 err_vq:
2034 	return ret;
2035 }
2036 
2037 static int virtnet_alloc_queues(struct virtnet_info *vi)
2038 {
2039 	int i;
2040 
2041 	vi->sq = kzalloc(sizeof(*vi->sq) * vi->max_queue_pairs, GFP_KERNEL);
2042 	if (!vi->sq)
2043 		goto err_sq;
2044 	vi->rq = kzalloc(sizeof(*vi->rq) * vi->max_queue_pairs, GFP_KERNEL);
2045 	if (!vi->rq)
2046 		goto err_rq;
2047 
2048 	INIT_DELAYED_WORK(&vi->refill, refill_work);
2049 	for (i = 0; i < vi->max_queue_pairs; i++) {
2050 		vi->rq[i].pages = NULL;
2051 		netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2052 			       napi_weight);
2053 
2054 		sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
2055 		ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
2056 		sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
2057 	}
2058 
2059 	return 0;
2060 
2061 err_rq:
2062 	kfree(vi->sq);
2063 err_sq:
2064 	return -ENOMEM;
2065 }
2066 
2067 static int init_vqs(struct virtnet_info *vi)
2068 {
2069 	int ret;
2070 
2071 	/* Allocate send & receive queues */
2072 	ret = virtnet_alloc_queues(vi);
2073 	if (ret)
2074 		goto err;
2075 
2076 	ret = virtnet_find_vqs(vi);
2077 	if (ret)
2078 		goto err_free;
2079 
2080 	get_online_cpus();
2081 	virtnet_set_affinity(vi);
2082 	put_online_cpus();
2083 
2084 	return 0;
2085 
2086 err_free:
2087 	virtnet_free_queues(vi);
2088 err:
2089 	return ret;
2090 }
2091 
2092 #ifdef CONFIG_SYSFS
2093 static ssize_t mergeable_rx_buffer_size_show(struct netdev_rx_queue *queue,
2094 		struct rx_queue_attribute *attribute, char *buf)
2095 {
2096 	struct virtnet_info *vi = netdev_priv(queue->dev);
2097 	unsigned int queue_index = get_netdev_rx_queue_index(queue);
2098 	struct ewma_pkt_len *avg;
2099 
2100 	BUG_ON(queue_index >= vi->max_queue_pairs);
2101 	avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2102 	return sprintf(buf, "%u\n", get_mergeable_buf_len(avg));
2103 }
2104 
2105 static struct rx_queue_attribute mergeable_rx_buffer_size_attribute =
2106 	__ATTR_RO(mergeable_rx_buffer_size);
2107 
2108 static struct attribute *virtio_net_mrg_rx_attrs[] = {
2109 	&mergeable_rx_buffer_size_attribute.attr,
2110 	NULL
2111 };
2112 
2113 static const struct attribute_group virtio_net_mrg_rx_group = {
2114 	.name = "virtio_net",
2115 	.attrs = virtio_net_mrg_rx_attrs
2116 };
2117 #endif
2118 
2119 static bool virtnet_fail_on_feature(struct virtio_device *vdev,
2120 				    unsigned int fbit,
2121 				    const char *fname, const char *dname)
2122 {
2123 	if (!virtio_has_feature(vdev, fbit))
2124 		return false;
2125 
2126 	dev_err(&vdev->dev, "device advertises feature %s but not %s",
2127 		fname, dname);
2128 
2129 	return true;
2130 }
2131 
2132 #define VIRTNET_FAIL_ON(vdev, fbit, dbit)			\
2133 	virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
2134 
2135 static bool virtnet_validate_features(struct virtio_device *vdev)
2136 {
2137 	if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
2138 	    (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
2139 			     "VIRTIO_NET_F_CTRL_VQ") ||
2140 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
2141 			     "VIRTIO_NET_F_CTRL_VQ") ||
2142 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
2143 			     "VIRTIO_NET_F_CTRL_VQ") ||
2144 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
2145 	     VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
2146 			     "VIRTIO_NET_F_CTRL_VQ"))) {
2147 		return false;
2148 	}
2149 
2150 	return true;
2151 }
2152 
2153 #define MIN_MTU ETH_MIN_MTU
2154 #define MAX_MTU ETH_MAX_MTU
2155 
2156 static int virtnet_probe(struct virtio_device *vdev)
2157 {
2158 	int i, err;
2159 	struct net_device *dev;
2160 	struct virtnet_info *vi;
2161 	u16 max_queue_pairs;
2162 	int mtu;
2163 
2164 	if (!vdev->config->get) {
2165 		dev_err(&vdev->dev, "%s failure: config access disabled\n",
2166 			__func__);
2167 		return -EINVAL;
2168 	}
2169 
2170 	if (!virtnet_validate_features(vdev))
2171 		return -EINVAL;
2172 
2173 	/* Find if host supports multiqueue virtio_net device */
2174 	err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
2175 				   struct virtio_net_config,
2176 				   max_virtqueue_pairs, &max_queue_pairs);
2177 
2178 	/* We need at least 2 queue's */
2179 	if (err || max_queue_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
2180 	    max_queue_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX ||
2181 	    !virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2182 		max_queue_pairs = 1;
2183 
2184 	/* Allocate ourselves a network device with room for our info */
2185 	dev = alloc_etherdev_mq(sizeof(struct virtnet_info), max_queue_pairs);
2186 	if (!dev)
2187 		return -ENOMEM;
2188 
2189 	/* Set up network device as normal. */
2190 	dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2191 	dev->netdev_ops = &virtnet_netdev;
2192 	dev->features = NETIF_F_HIGHDMA;
2193 
2194 	dev->ethtool_ops = &virtnet_ethtool_ops;
2195 	SET_NETDEV_DEV(dev, &vdev->dev);
2196 
2197 	/* Do we support "hardware" checksums? */
2198 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
2199 		/* This opens up the world of extra features. */
2200 		dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2201 		if (csum)
2202 			dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
2203 
2204 		if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
2205 			dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
2206 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
2207 		}
2208 		/* Individual feature bits: what can host handle? */
2209 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
2210 			dev->hw_features |= NETIF_F_TSO;
2211 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
2212 			dev->hw_features |= NETIF_F_TSO6;
2213 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
2214 			dev->hw_features |= NETIF_F_TSO_ECN;
2215 		if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
2216 			dev->hw_features |= NETIF_F_UFO;
2217 
2218 		dev->features |= NETIF_F_GSO_ROBUST;
2219 
2220 		if (gso)
2221 			dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
2222 		/* (!csum && gso) case will be fixed by register_netdev() */
2223 	}
2224 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
2225 		dev->features |= NETIF_F_RXCSUM;
2226 
2227 	dev->vlan_features = dev->features;
2228 
2229 	/* MTU range: 68 - 65535 */
2230 	dev->min_mtu = MIN_MTU;
2231 	dev->max_mtu = MAX_MTU;
2232 
2233 	/* Configuration may specify what MAC to use.  Otherwise random. */
2234 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
2235 		virtio_cread_bytes(vdev,
2236 				   offsetof(struct virtio_net_config, mac),
2237 				   dev->dev_addr, dev->addr_len);
2238 	else
2239 		eth_hw_addr_random(dev);
2240 
2241 	/* Set up our device-specific information */
2242 	vi = netdev_priv(dev);
2243 	vi->dev = dev;
2244 	vi->vdev = vdev;
2245 	vdev->priv = vi;
2246 	vi->stats = alloc_percpu(struct virtnet_stats);
2247 	err = -ENOMEM;
2248 	if (vi->stats == NULL)
2249 		goto free;
2250 
2251 	for_each_possible_cpu(i) {
2252 		struct virtnet_stats *virtnet_stats;
2253 		virtnet_stats = per_cpu_ptr(vi->stats, i);
2254 		u64_stats_init(&virtnet_stats->tx_syncp);
2255 		u64_stats_init(&virtnet_stats->rx_syncp);
2256 	}
2257 
2258 	INIT_WORK(&vi->config_work, virtnet_config_changed_work);
2259 
2260 	/* If we can receive ANY GSO packets, we must allocate large ones. */
2261 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2262 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2263 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
2264 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
2265 		vi->big_packets = true;
2266 
2267 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
2268 		vi->mergeable_rx_bufs = true;
2269 
2270 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF) ||
2271 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2272 		vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
2273 	else
2274 		vi->hdr_len = sizeof(struct virtio_net_hdr);
2275 
2276 	if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
2277 	    virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
2278 		vi->any_header_sg = true;
2279 
2280 	if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
2281 		vi->has_cvq = true;
2282 
2283 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MTU)) {
2284 		mtu = virtio_cread16(vdev,
2285 				     offsetof(struct virtio_net_config,
2286 					      mtu));
2287 		if (mtu < dev->min_mtu) {
2288 			__virtio_clear_bit(vdev, VIRTIO_NET_F_MTU);
2289 		} else {
2290 			dev->mtu = mtu;
2291 			dev->max_mtu = mtu;
2292 		}
2293 	}
2294 
2295 	if (vi->any_header_sg)
2296 		dev->needed_headroom = vi->hdr_len;
2297 
2298 	/* Enable multiqueue by default */
2299 	if (num_online_cpus() >= max_queue_pairs)
2300 		vi->curr_queue_pairs = max_queue_pairs;
2301 	else
2302 		vi->curr_queue_pairs = num_online_cpus();
2303 	vi->max_queue_pairs = max_queue_pairs;
2304 
2305 	/* Allocate/initialize the rx/tx queues, and invoke find_vqs */
2306 	err = init_vqs(vi);
2307 	if (err)
2308 		goto free_stats;
2309 
2310 #ifdef CONFIG_SYSFS
2311 	if (vi->mergeable_rx_bufs)
2312 		dev->sysfs_rx_queue_group = &virtio_net_mrg_rx_group;
2313 #endif
2314 	netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
2315 	netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
2316 
2317 	virtnet_init_settings(dev);
2318 
2319 	err = register_netdev(dev);
2320 	if (err) {
2321 		pr_debug("virtio_net: registering device failed\n");
2322 		goto free_vqs;
2323 	}
2324 
2325 	virtio_device_ready(vdev);
2326 
2327 	err = virtnet_cpu_notif_add(vi);
2328 	if (err) {
2329 		pr_debug("virtio_net: registering cpu notifier failed\n");
2330 		goto free_unregister_netdev;
2331 	}
2332 
2333 	rtnl_lock();
2334 	virtnet_set_queues(vi, vi->curr_queue_pairs);
2335 	rtnl_unlock();
2336 
2337 	/* Assume link up if device can't report link status,
2338 	   otherwise get link status from config. */
2339 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
2340 		netif_carrier_off(dev);
2341 		schedule_work(&vi->config_work);
2342 	} else {
2343 		vi->status = VIRTIO_NET_S_LINK_UP;
2344 		netif_carrier_on(dev);
2345 	}
2346 
2347 	pr_debug("virtnet: registered device %s with %d RX and TX vq's\n",
2348 		 dev->name, max_queue_pairs);
2349 
2350 	return 0;
2351 
2352 free_unregister_netdev:
2353 	vi->vdev->config->reset(vdev);
2354 
2355 	unregister_netdev(dev);
2356 free_vqs:
2357 	cancel_delayed_work_sync(&vi->refill);
2358 	free_receive_page_frags(vi);
2359 	virtnet_del_vqs(vi);
2360 free_stats:
2361 	free_percpu(vi->stats);
2362 free:
2363 	free_netdev(dev);
2364 	return err;
2365 }
2366 
2367 static void remove_vq_common(struct virtnet_info *vi)
2368 {
2369 	vi->vdev->config->reset(vi->vdev);
2370 
2371 	/* Free unused buffers in both send and recv, if any. */
2372 	free_unused_bufs(vi);
2373 
2374 	free_receive_bufs(vi);
2375 
2376 	free_receive_page_frags(vi);
2377 
2378 	virtnet_del_vqs(vi);
2379 }
2380 
2381 static void virtnet_remove(struct virtio_device *vdev)
2382 {
2383 	struct virtnet_info *vi = vdev->priv;
2384 
2385 	virtnet_cpu_notif_remove(vi);
2386 
2387 	/* Make sure no work handler is accessing the device. */
2388 	flush_work(&vi->config_work);
2389 
2390 	unregister_netdev(vi->dev);
2391 
2392 	remove_vq_common(vi);
2393 
2394 	free_percpu(vi->stats);
2395 	free_netdev(vi->dev);
2396 }
2397 
2398 #ifdef CONFIG_PM_SLEEP
2399 static int virtnet_freeze(struct virtio_device *vdev)
2400 {
2401 	struct virtnet_info *vi = vdev->priv;
2402 	int i;
2403 
2404 	virtnet_cpu_notif_remove(vi);
2405 
2406 	/* Make sure no work handler is accessing the device */
2407 	flush_work(&vi->config_work);
2408 
2409 	netif_device_detach(vi->dev);
2410 	cancel_delayed_work_sync(&vi->refill);
2411 
2412 	if (netif_running(vi->dev)) {
2413 		for (i = 0; i < vi->max_queue_pairs; i++)
2414 			napi_disable(&vi->rq[i].napi);
2415 	}
2416 
2417 	remove_vq_common(vi);
2418 
2419 	return 0;
2420 }
2421 
2422 static int virtnet_restore(struct virtio_device *vdev)
2423 {
2424 	struct virtnet_info *vi = vdev->priv;
2425 	int err, i;
2426 
2427 	err = init_vqs(vi);
2428 	if (err)
2429 		return err;
2430 
2431 	virtio_device_ready(vdev);
2432 
2433 	if (netif_running(vi->dev)) {
2434 		for (i = 0; i < vi->curr_queue_pairs; i++)
2435 			if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2436 				schedule_delayed_work(&vi->refill, 0);
2437 
2438 		for (i = 0; i < vi->max_queue_pairs; i++)
2439 			virtnet_napi_enable(&vi->rq[i]);
2440 	}
2441 
2442 	netif_device_attach(vi->dev);
2443 
2444 	rtnl_lock();
2445 	virtnet_set_queues(vi, vi->curr_queue_pairs);
2446 	rtnl_unlock();
2447 
2448 	err = virtnet_cpu_notif_add(vi);
2449 	if (err)
2450 		return err;
2451 
2452 	return 0;
2453 }
2454 #endif
2455 
2456 static struct virtio_device_id id_table[] = {
2457 	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
2458 	{ 0 },
2459 };
2460 
2461 #define VIRTNET_FEATURES \
2462 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \
2463 	VIRTIO_NET_F_MAC, \
2464 	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \
2465 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \
2466 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \
2467 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \
2468 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \
2469 	VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
2470 	VIRTIO_NET_F_CTRL_MAC_ADDR, \
2471 	VIRTIO_NET_F_MTU
2472 
2473 static unsigned int features[] = {
2474 	VIRTNET_FEATURES,
2475 };
2476 
2477 static unsigned int features_legacy[] = {
2478 	VIRTNET_FEATURES,
2479 	VIRTIO_NET_F_GSO,
2480 	VIRTIO_F_ANY_LAYOUT,
2481 };
2482 
2483 static struct virtio_driver virtio_net_driver = {
2484 	.feature_table = features,
2485 	.feature_table_size = ARRAY_SIZE(features),
2486 	.feature_table_legacy = features_legacy,
2487 	.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
2488 	.driver.name =	KBUILD_MODNAME,
2489 	.driver.owner =	THIS_MODULE,
2490 	.id_table =	id_table,
2491 	.probe =	virtnet_probe,
2492 	.remove =	virtnet_remove,
2493 	.config_changed = virtnet_config_changed,
2494 #ifdef CONFIG_PM_SLEEP
2495 	.freeze =	virtnet_freeze,
2496 	.restore =	virtnet_restore,
2497 #endif
2498 };
2499 
2500 static __init int virtio_net_driver_init(void)
2501 {
2502 	int ret;
2503 
2504 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "virtio/net:online",
2505 				      virtnet_cpu_online,
2506 				      virtnet_cpu_down_prep);
2507 	if (ret < 0)
2508 		goto out;
2509 	virtionet_online = ret;
2510 	ret = cpuhp_setup_state_multi(CPUHP_VIRT_NET_DEAD, "virtio/net:dead",
2511 				      NULL, virtnet_cpu_dead);
2512 	if (ret)
2513 		goto err_dead;
2514 
2515         ret = register_virtio_driver(&virtio_net_driver);
2516 	if (ret)
2517 		goto err_virtio;
2518 	return 0;
2519 err_virtio:
2520 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2521 err_dead:
2522 	cpuhp_remove_multi_state(virtionet_online);
2523 out:
2524 	return ret;
2525 }
2526 module_init(virtio_net_driver_init);
2527 
2528 static __exit void virtio_net_driver_exit(void)
2529 {
2530 	cpuhp_remove_multi_state(CPUHP_VIRT_NET_DEAD);
2531 	cpuhp_remove_multi_state(virtionet_online);
2532 	unregister_virtio_driver(&virtio_net_driver);
2533 }
2534 module_exit(virtio_net_driver_exit);
2535 
2536 MODULE_DEVICE_TABLE(virtio, id_table);
2537 MODULE_DESCRIPTION("Virtio network driver");
2538 MODULE_LICENSE("GPL");
2539