xref: /linux/drivers/net/virtio_net.c (revision a115bc070b1fc57ab23f3972401425927b5b465c)
1 /* A network driver using virtio.
2  *
3  * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
18  */
19 //#define DEBUG
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/ethtool.h>
23 #include <linux/module.h>
24 #include <linux/virtio.h>
25 #include <linux/virtio_net.h>
26 #include <linux/scatterlist.h>
27 #include <linux/if_vlan.h>
28 
29 static int napi_weight = 128;
30 module_param(napi_weight, int, 0444);
31 
32 static int csum = 1, gso = 1;
33 module_param(csum, bool, 0444);
34 module_param(gso, bool, 0444);
35 
36 /* FIXME: MTU in config. */
37 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
38 #define GOOD_COPY_LEN	128
39 
40 #define VIRTNET_SEND_COMMAND_SG_MAX    2
41 
42 struct virtnet_info
43 {
44 	struct virtio_device *vdev;
45 	struct virtqueue *rvq, *svq, *cvq;
46 	struct net_device *dev;
47 	struct napi_struct napi;
48 	unsigned int status;
49 
50 	/* Number of input buffers, and max we've ever had. */
51 	unsigned int num, max;
52 
53 	/* I like... big packets and I cannot lie! */
54 	bool big_packets;
55 
56 	/* Host will merge rx buffers for big packets (shake it! shake it!) */
57 	bool mergeable_rx_bufs;
58 
59 	/* Work struct for refilling if we run low on memory. */
60 	struct delayed_work refill;
61 
62 	/* Chain pages by the private ptr. */
63 	struct page *pages;
64 };
65 
66 struct skb_vnet_hdr {
67 	union {
68 		struct virtio_net_hdr hdr;
69 		struct virtio_net_hdr_mrg_rxbuf mhdr;
70 	};
71 	unsigned int num_sg;
72 };
73 
74 struct padded_vnet_hdr {
75 	struct virtio_net_hdr hdr;
76 	/*
77 	 * virtio_net_hdr should be in a separated sg buffer because of a
78 	 * QEMU bug, and data sg buffer shares same page with this header sg.
79 	 * This padding makes next sg 16 byte aligned after virtio_net_hdr.
80 	 */
81 	char padding[6];
82 };
83 
84 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
85 {
86 	return (struct skb_vnet_hdr *)skb->cb;
87 }
88 
89 /*
90  * private is used to chain pages for big packets, put the whole
91  * most recent used list in the beginning for reuse
92  */
93 static void give_pages(struct virtnet_info *vi, struct page *page)
94 {
95 	struct page *end;
96 
97 	/* Find end of list, sew whole thing into vi->pages. */
98 	for (end = page; end->private; end = (struct page *)end->private);
99 	end->private = (unsigned long)vi->pages;
100 	vi->pages = page;
101 }
102 
103 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
104 {
105 	struct page *p = vi->pages;
106 
107 	if (p) {
108 		vi->pages = (struct page *)p->private;
109 		/* clear private here, it is used to chain pages */
110 		p->private = 0;
111 	} else
112 		p = alloc_page(gfp_mask);
113 	return p;
114 }
115 
116 static void skb_xmit_done(struct virtqueue *svq)
117 {
118 	struct virtnet_info *vi = svq->vdev->priv;
119 
120 	/* Suppress further interrupts. */
121 	svq->vq_ops->disable_cb(svq);
122 
123 	/* We were probably waiting for more output buffers. */
124 	netif_wake_queue(vi->dev);
125 }
126 
127 static void set_skb_frag(struct sk_buff *skb, struct page *page,
128 			 unsigned int offset, unsigned int *len)
129 {
130 	int i = skb_shinfo(skb)->nr_frags;
131 	skb_frag_t *f;
132 
133 	f = &skb_shinfo(skb)->frags[i];
134 	f->size = min((unsigned)PAGE_SIZE - offset, *len);
135 	f->page_offset = offset;
136 	f->page = page;
137 
138 	skb->data_len += f->size;
139 	skb->len += f->size;
140 	skb_shinfo(skb)->nr_frags++;
141 	*len -= f->size;
142 }
143 
144 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
145 				   struct page *page, unsigned int len)
146 {
147 	struct sk_buff *skb;
148 	struct skb_vnet_hdr *hdr;
149 	unsigned int copy, hdr_len, offset;
150 	char *p;
151 
152 	p = page_address(page);
153 
154 	/* copy small packet so we can reuse these pages for small data */
155 	skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
156 	if (unlikely(!skb))
157 		return NULL;
158 
159 	hdr = skb_vnet_hdr(skb);
160 
161 	if (vi->mergeable_rx_bufs) {
162 		hdr_len = sizeof hdr->mhdr;
163 		offset = hdr_len;
164 	} else {
165 		hdr_len = sizeof hdr->hdr;
166 		offset = sizeof(struct padded_vnet_hdr);
167 	}
168 
169 	memcpy(hdr, p, hdr_len);
170 
171 	len -= hdr_len;
172 	p += offset;
173 
174 	copy = len;
175 	if (copy > skb_tailroom(skb))
176 		copy = skb_tailroom(skb);
177 	memcpy(skb_put(skb, copy), p, copy);
178 
179 	len -= copy;
180 	offset += copy;
181 
182 	while (len) {
183 		set_skb_frag(skb, page, offset, &len);
184 		page = (struct page *)page->private;
185 		offset = 0;
186 	}
187 
188 	if (page)
189 		give_pages(vi, page);
190 
191 	return skb;
192 }
193 
194 static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
195 {
196 	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
197 	struct page *page;
198 	int num_buf, i, len;
199 
200 	num_buf = hdr->mhdr.num_buffers;
201 	while (--num_buf) {
202 		i = skb_shinfo(skb)->nr_frags;
203 		if (i >= MAX_SKB_FRAGS) {
204 			pr_debug("%s: packet too long\n", skb->dev->name);
205 			skb->dev->stats.rx_length_errors++;
206 			return -EINVAL;
207 		}
208 
209 		page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
210 		if (!page) {
211 			pr_debug("%s: rx error: %d buffers missing\n",
212 				 skb->dev->name, hdr->mhdr.num_buffers);
213 			skb->dev->stats.rx_length_errors++;
214 			return -EINVAL;
215 		}
216 		if (len > PAGE_SIZE)
217 			len = PAGE_SIZE;
218 
219 		set_skb_frag(skb, page, 0, &len);
220 
221 		--vi->num;
222 	}
223 	return 0;
224 }
225 
226 static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
227 {
228 	struct virtnet_info *vi = netdev_priv(dev);
229 	struct sk_buff *skb;
230 	struct page *page;
231 	struct skb_vnet_hdr *hdr;
232 
233 	if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
234 		pr_debug("%s: short packet %i\n", dev->name, len);
235 		dev->stats.rx_length_errors++;
236 		if (vi->mergeable_rx_bufs || vi->big_packets)
237 			give_pages(vi, buf);
238 		else
239 			dev_kfree_skb(buf);
240 		return;
241 	}
242 
243 	if (!vi->mergeable_rx_bufs && !vi->big_packets) {
244 		skb = buf;
245 		len -= sizeof(struct virtio_net_hdr);
246 		skb_trim(skb, len);
247 	} else {
248 		page = buf;
249 		skb = page_to_skb(vi, page, len);
250 		if (unlikely(!skb)) {
251 			dev->stats.rx_dropped++;
252 			give_pages(vi, page);
253 			return;
254 		}
255 		if (vi->mergeable_rx_bufs)
256 			if (receive_mergeable(vi, skb)) {
257 				dev_kfree_skb(skb);
258 				return;
259 			}
260 	}
261 
262 	hdr = skb_vnet_hdr(skb);
263 	skb->truesize += skb->data_len;
264 	dev->stats.rx_bytes += skb->len;
265 	dev->stats.rx_packets++;
266 
267 	if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
268 		pr_debug("Needs csum!\n");
269 		if (!skb_partial_csum_set(skb,
270 					  hdr->hdr.csum_start,
271 					  hdr->hdr.csum_offset))
272 			goto frame_err;
273 	}
274 
275 	skb->protocol = eth_type_trans(skb, dev);
276 	pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
277 		 ntohs(skb->protocol), skb->len, skb->pkt_type);
278 
279 	if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
280 		pr_debug("GSO!\n");
281 		switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
282 		case VIRTIO_NET_HDR_GSO_TCPV4:
283 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
284 			break;
285 		case VIRTIO_NET_HDR_GSO_UDP:
286 			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
287 			break;
288 		case VIRTIO_NET_HDR_GSO_TCPV6:
289 			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
290 			break;
291 		default:
292 			if (net_ratelimit())
293 				printk(KERN_WARNING "%s: bad gso type %u.\n",
294 				       dev->name, hdr->hdr.gso_type);
295 			goto frame_err;
296 		}
297 
298 		if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
299 			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
300 
301 		skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
302 		if (skb_shinfo(skb)->gso_size == 0) {
303 			if (net_ratelimit())
304 				printk(KERN_WARNING "%s: zero gso size.\n",
305 				       dev->name);
306 			goto frame_err;
307 		}
308 
309 		/* Header must be checked, and gso_segs computed. */
310 		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
311 		skb_shinfo(skb)->gso_segs = 0;
312 	}
313 
314 	netif_receive_skb(skb);
315 	return;
316 
317 frame_err:
318 	dev->stats.rx_frame_errors++;
319 	dev_kfree_skb(skb);
320 }
321 
322 static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
323 {
324 	struct sk_buff *skb;
325 	struct skb_vnet_hdr *hdr;
326 	struct scatterlist sg[2];
327 	int err;
328 
329 	skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
330 	if (unlikely(!skb))
331 		return -ENOMEM;
332 
333 	skb_put(skb, MAX_PACKET_LEN);
334 
335 	hdr = skb_vnet_hdr(skb);
336 	sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
337 
338 	skb_to_sgvec(skb, sg + 1, 0, skb->len);
339 
340 	err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
341 	if (err < 0)
342 		dev_kfree_skb(skb);
343 
344 	return err;
345 }
346 
347 static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
348 {
349 	struct scatterlist sg[MAX_SKB_FRAGS + 2];
350 	struct page *first, *list = NULL;
351 	char *p;
352 	int i, err, offset;
353 
354 	/* page in sg[MAX_SKB_FRAGS + 1] is list tail */
355 	for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
356 		first = get_a_page(vi, gfp);
357 		if (!first) {
358 			if (list)
359 				give_pages(vi, list);
360 			return -ENOMEM;
361 		}
362 		sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
363 
364 		/* chain new page in list head to match sg */
365 		first->private = (unsigned long)list;
366 		list = first;
367 	}
368 
369 	first = get_a_page(vi, gfp);
370 	if (!first) {
371 		give_pages(vi, list);
372 		return -ENOMEM;
373 	}
374 	p = page_address(first);
375 
376 	/* sg[0], sg[1] share the same page */
377 	/* a separated sg[0] for  virtio_net_hdr only during to QEMU bug*/
378 	sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
379 
380 	/* sg[1] for data packet, from offset */
381 	offset = sizeof(struct padded_vnet_hdr);
382 	sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
383 
384 	/* chain first in list head */
385 	first->private = (unsigned long)list;
386 	err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
387 				       first);
388 	if (err < 0)
389 		give_pages(vi, first);
390 
391 	return err;
392 }
393 
394 static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
395 {
396 	struct page *page;
397 	struct scatterlist sg;
398 	int err;
399 
400 	page = get_a_page(vi, gfp);
401 	if (!page)
402 		return -ENOMEM;
403 
404 	sg_init_one(&sg, page_address(page), PAGE_SIZE);
405 
406 	err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
407 	if (err < 0)
408 		give_pages(vi, page);
409 
410 	return err;
411 }
412 
413 /* Returns false if we couldn't fill entirely (OOM). */
414 static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
415 {
416 	int err;
417 	bool oom = false;
418 
419 	do {
420 		if (vi->mergeable_rx_bufs)
421 			err = add_recvbuf_mergeable(vi, gfp);
422 		else if (vi->big_packets)
423 			err = add_recvbuf_big(vi, gfp);
424 		else
425 			err = add_recvbuf_small(vi, gfp);
426 
427 		if (err < 0) {
428 			oom = true;
429 			break;
430 		}
431 		++vi->num;
432 	} while (err > 0);
433 	if (unlikely(vi->num > vi->max))
434 		vi->max = vi->num;
435 	vi->rvq->vq_ops->kick(vi->rvq);
436 	return !oom;
437 }
438 
439 static void skb_recv_done(struct virtqueue *rvq)
440 {
441 	struct virtnet_info *vi = rvq->vdev->priv;
442 	/* Schedule NAPI, Suppress further interrupts if successful. */
443 	if (napi_schedule_prep(&vi->napi)) {
444 		rvq->vq_ops->disable_cb(rvq);
445 		__napi_schedule(&vi->napi);
446 	}
447 }
448 
449 static void refill_work(struct work_struct *work)
450 {
451 	struct virtnet_info *vi;
452 	bool still_empty;
453 
454 	vi = container_of(work, struct virtnet_info, refill.work);
455 	napi_disable(&vi->napi);
456 	still_empty = !try_fill_recv(vi, GFP_KERNEL);
457 	napi_enable(&vi->napi);
458 
459 	/* In theory, this can happen: if we don't get any buffers in
460 	 * we will *never* try to fill again. */
461 	if (still_empty)
462 		schedule_delayed_work(&vi->refill, HZ/2);
463 }
464 
465 static int virtnet_poll(struct napi_struct *napi, int budget)
466 {
467 	struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
468 	void *buf;
469 	unsigned int len, received = 0;
470 
471 again:
472 	while (received < budget &&
473 	       (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
474 		receive_buf(vi->dev, buf, len);
475 		--vi->num;
476 		received++;
477 	}
478 
479 	if (vi->num < vi->max / 2) {
480 		if (!try_fill_recv(vi, GFP_ATOMIC))
481 			schedule_delayed_work(&vi->refill, 0);
482 	}
483 
484 	/* Out of packets? */
485 	if (received < budget) {
486 		napi_complete(napi);
487 		if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) &&
488 		    napi_schedule_prep(napi)) {
489 			vi->rvq->vq_ops->disable_cb(vi->rvq);
490 			__napi_schedule(napi);
491 			goto again;
492 		}
493 	}
494 
495 	return received;
496 }
497 
498 static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
499 {
500 	struct sk_buff *skb;
501 	unsigned int len, tot_sgs = 0;
502 
503 	while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
504 		pr_debug("Sent skb %p\n", skb);
505 		vi->dev->stats.tx_bytes += skb->len;
506 		vi->dev->stats.tx_packets++;
507 		tot_sgs += skb_vnet_hdr(skb)->num_sg;
508 		dev_kfree_skb_any(skb);
509 	}
510 	return tot_sgs;
511 }
512 
513 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
514 {
515 	struct scatterlist sg[2+MAX_SKB_FRAGS];
516 	struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
517 	const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
518 
519 	sg_init_table(sg, 2+MAX_SKB_FRAGS);
520 
521 	pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
522 
523 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
524 		hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
525 		hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
526 		hdr->hdr.csum_offset = skb->csum_offset;
527 	} else {
528 		hdr->hdr.flags = 0;
529 		hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
530 	}
531 
532 	if (skb_is_gso(skb)) {
533 		hdr->hdr.hdr_len = skb_headlen(skb);
534 		hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
535 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
536 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
537 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
538 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
539 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
540 			hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
541 		else
542 			BUG();
543 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
544 			hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
545 	} else {
546 		hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
547 		hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
548 	}
549 
550 	hdr->mhdr.num_buffers = 0;
551 
552 	/* Encode metadata header at front. */
553 	if (vi->mergeable_rx_bufs)
554 		sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
555 	else
556 		sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
557 
558 	hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
559 	return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
560 }
561 
562 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
563 {
564 	struct virtnet_info *vi = netdev_priv(dev);
565 	int capacity;
566 
567 again:
568 	/* Free up any pending old buffers before queueing new ones. */
569 	free_old_xmit_skbs(vi);
570 
571 	/* Try to transmit */
572 	capacity = xmit_skb(vi, skb);
573 
574 	/* This can happen with OOM and indirect buffers. */
575 	if (unlikely(capacity < 0)) {
576 		netif_stop_queue(dev);
577 		dev_warn(&dev->dev, "Unexpected full queue\n");
578 		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
579 			vi->svq->vq_ops->disable_cb(vi->svq);
580 			netif_start_queue(dev);
581 			goto again;
582 		}
583 		return NETDEV_TX_BUSY;
584 	}
585 	vi->svq->vq_ops->kick(vi->svq);
586 
587 	/* Don't wait up for transmitted skbs to be freed. */
588 	skb_orphan(skb);
589 	nf_reset(skb);
590 
591 	/* Apparently nice girls don't return TX_BUSY; stop the queue
592 	 * before it gets out of hand.  Naturally, this wastes entries. */
593 	if (capacity < 2+MAX_SKB_FRAGS) {
594 		netif_stop_queue(dev);
595 		if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
596 			/* More just got used, free them then recheck. */
597 			capacity += free_old_xmit_skbs(vi);
598 			if (capacity >= 2+MAX_SKB_FRAGS) {
599 				netif_start_queue(dev);
600 				vi->svq->vq_ops->disable_cb(vi->svq);
601 			}
602 		}
603 	}
604 
605 	return NETDEV_TX_OK;
606 }
607 
608 static int virtnet_set_mac_address(struct net_device *dev, void *p)
609 {
610 	struct virtnet_info *vi = netdev_priv(dev);
611 	struct virtio_device *vdev = vi->vdev;
612 	int ret;
613 
614 	ret = eth_mac_addr(dev, p);
615 	if (ret)
616 		return ret;
617 
618 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
619 		vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
620 		                  dev->dev_addr, dev->addr_len);
621 
622 	return 0;
623 }
624 
625 #ifdef CONFIG_NET_POLL_CONTROLLER
626 static void virtnet_netpoll(struct net_device *dev)
627 {
628 	struct virtnet_info *vi = netdev_priv(dev);
629 
630 	napi_schedule(&vi->napi);
631 }
632 #endif
633 
634 static int virtnet_open(struct net_device *dev)
635 {
636 	struct virtnet_info *vi = netdev_priv(dev);
637 
638 	napi_enable(&vi->napi);
639 
640 	/* If all buffers were filled by other side before we napi_enabled, we
641 	 * won't get another interrupt, so process any outstanding packets
642 	 * now.  virtnet_poll wants re-enable the queue, so we disable here.
643 	 * We synchronize against interrupts via NAPI_STATE_SCHED */
644 	if (napi_schedule_prep(&vi->napi)) {
645 		vi->rvq->vq_ops->disable_cb(vi->rvq);
646 		__napi_schedule(&vi->napi);
647 	}
648 	return 0;
649 }
650 
651 /*
652  * Send command via the control virtqueue and check status.  Commands
653  * supported by the hypervisor, as indicated by feature bits, should
654  * never fail unless improperly formated.
655  */
656 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
657 				 struct scatterlist *data, int out, int in)
658 {
659 	struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
660 	struct virtio_net_ctrl_hdr ctrl;
661 	virtio_net_ctrl_ack status = ~0;
662 	unsigned int tmp;
663 	int i;
664 
665 	/* Caller should know better */
666 	BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
667 		(out + in > VIRTNET_SEND_COMMAND_SG_MAX));
668 
669 	out++; /* Add header */
670 	in++; /* Add return status */
671 
672 	ctrl.class = class;
673 	ctrl.cmd = cmd;
674 
675 	sg_init_table(sg, out + in);
676 
677 	sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
678 	for_each_sg(data, s, out + in - 2, i)
679 		sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
680 	sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
681 
682 	BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
683 
684 	vi->cvq->vq_ops->kick(vi->cvq);
685 
686 	/*
687 	 * Spin for a response, the kick causes an ioport write, trapping
688 	 * into the hypervisor, so the request should be handled immediately.
689 	 */
690 	while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
691 		cpu_relax();
692 
693 	return status == VIRTIO_NET_OK;
694 }
695 
696 static int virtnet_close(struct net_device *dev)
697 {
698 	struct virtnet_info *vi = netdev_priv(dev);
699 
700 	napi_disable(&vi->napi);
701 
702 	return 0;
703 }
704 
705 static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
706 {
707 	struct virtnet_info *vi = netdev_priv(dev);
708 	struct virtio_device *vdev = vi->vdev;
709 
710 	if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
711 		return -ENOSYS;
712 
713 	return ethtool_op_set_tx_hw_csum(dev, data);
714 }
715 
716 static void virtnet_set_rx_mode(struct net_device *dev)
717 {
718 	struct virtnet_info *vi = netdev_priv(dev);
719 	struct scatterlist sg[2];
720 	u8 promisc, allmulti;
721 	struct virtio_net_ctrl_mac *mac_data;
722 	struct dev_addr_list *addr;
723 	struct netdev_hw_addr *ha;
724 	int uc_count;
725 	int mc_count;
726 	void *buf;
727 	int i;
728 
729 	/* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
730 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
731 		return;
732 
733 	promisc = ((dev->flags & IFF_PROMISC) != 0);
734 	allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
735 
736 	sg_init_one(sg, &promisc, sizeof(promisc));
737 
738 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
739 				  VIRTIO_NET_CTRL_RX_PROMISC,
740 				  sg, 1, 0))
741 		dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
742 			 promisc ? "en" : "dis");
743 
744 	sg_init_one(sg, &allmulti, sizeof(allmulti));
745 
746 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
747 				  VIRTIO_NET_CTRL_RX_ALLMULTI,
748 				  sg, 1, 0))
749 		dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
750 			 allmulti ? "en" : "dis");
751 
752 	uc_count = netdev_uc_count(dev);
753 	mc_count = netdev_mc_count(dev);
754 	/* MAC filter - use one buffer for both lists */
755 	buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
756 		      (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
757 	mac_data = buf;
758 	if (!buf) {
759 		dev_warn(&dev->dev, "No memory for MAC address buffer\n");
760 		return;
761 	}
762 
763 	sg_init_table(sg, 2);
764 
765 	/* Store the unicast list and count in the front of the buffer */
766 	mac_data->entries = uc_count;
767 	i = 0;
768 	netdev_for_each_uc_addr(ha, dev)
769 		memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
770 
771 	sg_set_buf(&sg[0], mac_data,
772 		   sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
773 
774 	/* multicast list and count fill the end */
775 	mac_data = (void *)&mac_data->macs[uc_count][0];
776 
777 	mac_data->entries = mc_count;
778 	i = 0;
779 	netdev_for_each_mc_addr(addr, dev)
780 		memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
781 
782 	sg_set_buf(&sg[1], mac_data,
783 		   sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
784 
785 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
786 				  VIRTIO_NET_CTRL_MAC_TABLE_SET,
787 				  sg, 2, 0))
788 		dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
789 
790 	kfree(buf);
791 }
792 
793 static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
794 {
795 	struct virtnet_info *vi = netdev_priv(dev);
796 	struct scatterlist sg;
797 
798 	sg_init_one(&sg, &vid, sizeof(vid));
799 
800 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
801 				  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
802 		dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
803 }
804 
805 static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
806 {
807 	struct virtnet_info *vi = netdev_priv(dev);
808 	struct scatterlist sg;
809 
810 	sg_init_one(&sg, &vid, sizeof(vid));
811 
812 	if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
813 				  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
814 		dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
815 }
816 
817 static const struct ethtool_ops virtnet_ethtool_ops = {
818 	.set_tx_csum = virtnet_set_tx_csum,
819 	.set_sg = ethtool_op_set_sg,
820 	.set_tso = ethtool_op_set_tso,
821 	.set_ufo = ethtool_op_set_ufo,
822 	.get_link = ethtool_op_get_link,
823 };
824 
825 #define MIN_MTU 68
826 #define MAX_MTU 65535
827 
828 static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
829 {
830 	if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
831 		return -EINVAL;
832 	dev->mtu = new_mtu;
833 	return 0;
834 }
835 
836 static const struct net_device_ops virtnet_netdev = {
837 	.ndo_open            = virtnet_open,
838 	.ndo_stop   	     = virtnet_close,
839 	.ndo_start_xmit      = start_xmit,
840 	.ndo_validate_addr   = eth_validate_addr,
841 	.ndo_set_mac_address = virtnet_set_mac_address,
842 	.ndo_set_rx_mode     = virtnet_set_rx_mode,
843 	.ndo_change_mtu	     = virtnet_change_mtu,
844 	.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
845 	.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
846 #ifdef CONFIG_NET_POLL_CONTROLLER
847 	.ndo_poll_controller = virtnet_netpoll,
848 #endif
849 };
850 
851 static void virtnet_update_status(struct virtnet_info *vi)
852 {
853 	u16 v;
854 
855 	if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
856 		return;
857 
858 	vi->vdev->config->get(vi->vdev,
859 			      offsetof(struct virtio_net_config, status),
860 			      &v, sizeof(v));
861 
862 	/* Ignore unknown (future) status bits */
863 	v &= VIRTIO_NET_S_LINK_UP;
864 
865 	if (vi->status == v)
866 		return;
867 
868 	vi->status = v;
869 
870 	if (vi->status & VIRTIO_NET_S_LINK_UP) {
871 		netif_carrier_on(vi->dev);
872 		netif_wake_queue(vi->dev);
873 	} else {
874 		netif_carrier_off(vi->dev);
875 		netif_stop_queue(vi->dev);
876 	}
877 }
878 
879 static void virtnet_config_changed(struct virtio_device *vdev)
880 {
881 	struct virtnet_info *vi = vdev->priv;
882 
883 	virtnet_update_status(vi);
884 }
885 
886 static int virtnet_probe(struct virtio_device *vdev)
887 {
888 	int err;
889 	struct net_device *dev;
890 	struct virtnet_info *vi;
891 	struct virtqueue *vqs[3];
892 	vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
893 	const char *names[] = { "input", "output", "control" };
894 	int nvqs;
895 
896 	/* Allocate ourselves a network device with room for our info */
897 	dev = alloc_etherdev(sizeof(struct virtnet_info));
898 	if (!dev)
899 		return -ENOMEM;
900 
901 	/* Set up network device as normal. */
902 	dev->netdev_ops = &virtnet_netdev;
903 	dev->features = NETIF_F_HIGHDMA;
904 	SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
905 	SET_NETDEV_DEV(dev, &vdev->dev);
906 
907 	/* Do we support "hardware" checksums? */
908 	if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
909 		/* This opens up the world of extra features. */
910 		dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
911 		if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
912 			dev->features |= NETIF_F_TSO | NETIF_F_UFO
913 				| NETIF_F_TSO_ECN | NETIF_F_TSO6;
914 		}
915 		/* Individual feature bits: what can host handle? */
916 		if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
917 			dev->features |= NETIF_F_TSO;
918 		if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
919 			dev->features |= NETIF_F_TSO6;
920 		if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
921 			dev->features |= NETIF_F_TSO_ECN;
922 		if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
923 			dev->features |= NETIF_F_UFO;
924 	}
925 
926 	/* Configuration may specify what MAC to use.  Otherwise random. */
927 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
928 		vdev->config->get(vdev,
929 				  offsetof(struct virtio_net_config, mac),
930 				  dev->dev_addr, dev->addr_len);
931 	} else
932 		random_ether_addr(dev->dev_addr);
933 
934 	/* Set up our device-specific information */
935 	vi = netdev_priv(dev);
936 	netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
937 	vi->dev = dev;
938 	vi->vdev = vdev;
939 	vdev->priv = vi;
940 	vi->pages = NULL;
941 	INIT_DELAYED_WORK(&vi->refill, refill_work);
942 
943 	/* If we can receive ANY GSO packets, we must allocate large ones. */
944 	if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
945 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
946 	    virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
947 		vi->big_packets = true;
948 
949 	if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
950 		vi->mergeable_rx_bufs = true;
951 
952 	/* We expect two virtqueues, receive then send,
953 	 * and optionally control. */
954 	nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
955 
956 	err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
957 	if (err)
958 		goto free;
959 
960 	vi->rvq = vqs[0];
961 	vi->svq = vqs[1];
962 
963 	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
964 		vi->cvq = vqs[2];
965 
966 		if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
967 			dev->features |= NETIF_F_HW_VLAN_FILTER;
968 	}
969 
970 	err = register_netdev(dev);
971 	if (err) {
972 		pr_debug("virtio_net: registering device failed\n");
973 		goto free_vqs;
974 	}
975 
976 	/* Last of all, set up some receive buffers. */
977 	try_fill_recv(vi, GFP_KERNEL);
978 
979 	/* If we didn't even get one input buffer, we're useless. */
980 	if (vi->num == 0) {
981 		err = -ENOMEM;
982 		goto unregister;
983 	}
984 
985 	vi->status = VIRTIO_NET_S_LINK_UP;
986 	virtnet_update_status(vi);
987 	netif_carrier_on(dev);
988 
989 	pr_debug("virtnet: registered device %s\n", dev->name);
990 	return 0;
991 
992 unregister:
993 	unregister_netdev(dev);
994 	cancel_delayed_work_sync(&vi->refill);
995 free_vqs:
996 	vdev->config->del_vqs(vdev);
997 free:
998 	free_netdev(dev);
999 	return err;
1000 }
1001 
1002 static void free_unused_bufs(struct virtnet_info *vi)
1003 {
1004 	void *buf;
1005 	while (1) {
1006 		buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
1007 		if (!buf)
1008 			break;
1009 		dev_kfree_skb(buf);
1010 	}
1011 	while (1) {
1012 		buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
1013 		if (!buf)
1014 			break;
1015 		if (vi->mergeable_rx_bufs || vi->big_packets)
1016 			give_pages(vi, buf);
1017 		else
1018 			dev_kfree_skb(buf);
1019 		--vi->num;
1020 	}
1021 	BUG_ON(vi->num != 0);
1022 }
1023 
1024 static void __devexit virtnet_remove(struct virtio_device *vdev)
1025 {
1026 	struct virtnet_info *vi = vdev->priv;
1027 
1028 	/* Stop all the virtqueues. */
1029 	vdev->config->reset(vdev);
1030 
1031 
1032 	unregister_netdev(vi->dev);
1033 	cancel_delayed_work_sync(&vi->refill);
1034 
1035 	/* Free unused buffers in both send and recv, if any. */
1036 	free_unused_bufs(vi);
1037 
1038 	vdev->config->del_vqs(vi->vdev);
1039 
1040 	while (vi->pages)
1041 		__free_pages(get_a_page(vi, GFP_KERNEL), 0);
1042 
1043 	free_netdev(vi->dev);
1044 }
1045 
1046 static struct virtio_device_id id_table[] = {
1047 	{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1048 	{ 0 },
1049 };
1050 
1051 static unsigned int features[] = {
1052 	VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1053 	VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1054 	VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
1055 	VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1056 	VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1057 	VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1058 	VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1059 };
1060 
1061 static struct virtio_driver virtio_net_driver = {
1062 	.feature_table = features,
1063 	.feature_table_size = ARRAY_SIZE(features),
1064 	.driver.name =	KBUILD_MODNAME,
1065 	.driver.owner =	THIS_MODULE,
1066 	.id_table =	id_table,
1067 	.probe =	virtnet_probe,
1068 	.remove =	__devexit_p(virtnet_remove),
1069 	.config_changed = virtnet_config_changed,
1070 };
1071 
1072 static int __init init(void)
1073 {
1074 	return register_virtio_driver(&virtio_net_driver);
1075 }
1076 
1077 static void __exit fini(void)
1078 {
1079 	unregister_virtio_driver(&virtio_net_driver);
1080 }
1081 module_init(init);
1082 module_exit(fini);
1083 
1084 MODULE_DEVICE_TABLE(virtio, id_table);
1085 MODULE_DESCRIPTION("Virtio network driver");
1086 MODULE_LICENSE("GPL");
1087