1 /* A simple network driver using virtio. 2 * 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 //#define DEBUG 20 #include <linux/netdevice.h> 21 #include <linux/etherdevice.h> 22 #include <linux/ethtool.h> 23 #include <linux/module.h> 24 #include <linux/virtio.h> 25 #include <linux/virtio_net.h> 26 #include <linux/scatterlist.h> 27 #include <linux/if_vlan.h> 28 29 static int napi_weight = 128; 30 module_param(napi_weight, int, 0444); 31 32 static int csum = 1, gso = 1; 33 module_param(csum, bool, 0444); 34 module_param(gso, bool, 0444); 35 36 /* FIXME: MTU in config. */ 37 #define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN) 38 #define GOOD_COPY_LEN 128 39 40 struct virtnet_info 41 { 42 struct virtio_device *vdev; 43 struct virtqueue *rvq, *svq; 44 struct net_device *dev; 45 struct napi_struct napi; 46 47 /* The skb we couldn't send because buffers were full. */ 48 struct sk_buff *last_xmit_skb; 49 50 /* If we need to free in a timer, this is it. */ 51 struct timer_list xmit_free_timer; 52 53 /* Number of input buffers, and max we've ever had. */ 54 unsigned int num, max; 55 56 /* For cleaning up after transmission. */ 57 struct tasklet_struct tasklet; 58 bool free_in_tasklet; 59 60 /* I like... big packets and I cannot lie! */ 61 bool big_packets; 62 63 /* Host will merge rx buffers for big packets (shake it! shake it!) */ 64 bool mergeable_rx_bufs; 65 66 /* Receive & send queues. */ 67 struct sk_buff_head recv; 68 struct sk_buff_head send; 69 70 /* Chain pages by the private ptr. */ 71 struct page *pages; 72 }; 73 74 static inline void *skb_vnet_hdr(struct sk_buff *skb) 75 { 76 return (struct virtio_net_hdr *)skb->cb; 77 } 78 79 static void give_a_page(struct virtnet_info *vi, struct page *page) 80 { 81 page->private = (unsigned long)vi->pages; 82 vi->pages = page; 83 } 84 85 static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb) 86 { 87 unsigned int i; 88 89 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 90 give_a_page(vi, skb_shinfo(skb)->frags[i].page); 91 skb_shinfo(skb)->nr_frags = 0; 92 skb->data_len = 0; 93 } 94 95 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) 96 { 97 struct page *p = vi->pages; 98 99 if (p) 100 vi->pages = (struct page *)p->private; 101 else 102 p = alloc_page(gfp_mask); 103 return p; 104 } 105 106 static void skb_xmit_done(struct virtqueue *svq) 107 { 108 struct virtnet_info *vi = svq->vdev->priv; 109 110 /* Suppress further interrupts. */ 111 svq->vq_ops->disable_cb(svq); 112 113 /* We were probably waiting for more output buffers. */ 114 netif_wake_queue(vi->dev); 115 116 /* Make sure we re-xmit last_xmit_skb: if there are no more packets 117 * queued, start_xmit won't be called. */ 118 tasklet_schedule(&vi->tasklet); 119 } 120 121 static void receive_skb(struct net_device *dev, struct sk_buff *skb, 122 unsigned len) 123 { 124 struct virtnet_info *vi = netdev_priv(dev); 125 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 126 int err; 127 int i; 128 129 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 130 pr_debug("%s: short packet %i\n", dev->name, len); 131 dev->stats.rx_length_errors++; 132 goto drop; 133 } 134 135 if (vi->mergeable_rx_bufs) { 136 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb); 137 unsigned int copy; 138 char *p = page_address(skb_shinfo(skb)->frags[0].page); 139 140 if (len > PAGE_SIZE) 141 len = PAGE_SIZE; 142 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf); 143 144 memcpy(hdr, p, sizeof(*mhdr)); 145 p += sizeof(*mhdr); 146 147 copy = len; 148 if (copy > skb_tailroom(skb)) 149 copy = skb_tailroom(skb); 150 151 memcpy(skb_put(skb, copy), p, copy); 152 153 len -= copy; 154 155 if (!len) { 156 give_a_page(vi, skb_shinfo(skb)->frags[0].page); 157 skb_shinfo(skb)->nr_frags--; 158 } else { 159 skb_shinfo(skb)->frags[0].page_offset += 160 sizeof(*mhdr) + copy; 161 skb_shinfo(skb)->frags[0].size = len; 162 skb->data_len += len; 163 skb->len += len; 164 } 165 166 while (--mhdr->num_buffers) { 167 struct sk_buff *nskb; 168 169 i = skb_shinfo(skb)->nr_frags; 170 if (i >= MAX_SKB_FRAGS) { 171 pr_debug("%s: packet too long %d\n", dev->name, 172 len); 173 dev->stats.rx_length_errors++; 174 goto drop; 175 } 176 177 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 178 if (!nskb) { 179 pr_debug("%s: rx error: %d buffers missing\n", 180 dev->name, mhdr->num_buffers); 181 dev->stats.rx_length_errors++; 182 goto drop; 183 } 184 185 __skb_unlink(nskb, &vi->recv); 186 vi->num--; 187 188 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0]; 189 skb_shinfo(nskb)->nr_frags = 0; 190 kfree_skb(nskb); 191 192 if (len > PAGE_SIZE) 193 len = PAGE_SIZE; 194 195 skb_shinfo(skb)->frags[i].size = len; 196 skb_shinfo(skb)->nr_frags++; 197 skb->data_len += len; 198 skb->len += len; 199 } 200 } else { 201 len -= sizeof(struct virtio_net_hdr); 202 203 if (len <= MAX_PACKET_LEN) 204 trim_pages(vi, skb); 205 206 err = pskb_trim(skb, len); 207 if (err) { 208 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, 209 len, err); 210 dev->stats.rx_dropped++; 211 goto drop; 212 } 213 } 214 215 skb->truesize += skb->data_len; 216 dev->stats.rx_bytes += skb->len; 217 dev->stats.rx_packets++; 218 219 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 220 pr_debug("Needs csum!\n"); 221 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset)) 222 goto frame_err; 223 } 224 225 skb->protocol = eth_type_trans(skb, dev); 226 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 227 ntohs(skb->protocol), skb->len, skb->pkt_type); 228 229 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 230 pr_debug("GSO!\n"); 231 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 232 case VIRTIO_NET_HDR_GSO_TCPV4: 233 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 234 break; 235 case VIRTIO_NET_HDR_GSO_UDP: 236 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 237 break; 238 case VIRTIO_NET_HDR_GSO_TCPV6: 239 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 240 break; 241 default: 242 if (net_ratelimit()) 243 printk(KERN_WARNING "%s: bad gso type %u.\n", 244 dev->name, hdr->gso_type); 245 goto frame_err; 246 } 247 248 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 249 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 250 251 skb_shinfo(skb)->gso_size = hdr->gso_size; 252 if (skb_shinfo(skb)->gso_size == 0) { 253 if (net_ratelimit()) 254 printk(KERN_WARNING "%s: zero gso size.\n", 255 dev->name); 256 goto frame_err; 257 } 258 259 /* Header must be checked, and gso_segs computed. */ 260 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 261 skb_shinfo(skb)->gso_segs = 0; 262 } 263 264 netif_receive_skb(skb); 265 return; 266 267 frame_err: 268 dev->stats.rx_frame_errors++; 269 drop: 270 dev_kfree_skb(skb); 271 } 272 273 static void try_fill_recv_maxbufs(struct virtnet_info *vi) 274 { 275 struct sk_buff *skb; 276 struct scatterlist sg[2+MAX_SKB_FRAGS]; 277 int num, err, i; 278 279 sg_init_table(sg, 2+MAX_SKB_FRAGS); 280 for (;;) { 281 struct virtio_net_hdr *hdr; 282 283 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN); 284 if (unlikely(!skb)) 285 break; 286 287 skb_put(skb, MAX_PACKET_LEN); 288 289 hdr = skb_vnet_hdr(skb); 290 sg_set_buf(sg, hdr, sizeof(*hdr)); 291 292 if (vi->big_packets) { 293 for (i = 0; i < MAX_SKB_FRAGS; i++) { 294 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 295 f->page = get_a_page(vi, GFP_ATOMIC); 296 if (!f->page) 297 break; 298 299 f->page_offset = 0; 300 f->size = PAGE_SIZE; 301 302 skb->data_len += PAGE_SIZE; 303 skb->len += PAGE_SIZE; 304 305 skb_shinfo(skb)->nr_frags++; 306 } 307 } 308 309 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 310 skb_queue_head(&vi->recv, skb); 311 312 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb); 313 if (err) { 314 skb_unlink(skb, &vi->recv); 315 trim_pages(vi, skb); 316 kfree_skb(skb); 317 break; 318 } 319 vi->num++; 320 } 321 if (unlikely(vi->num > vi->max)) 322 vi->max = vi->num; 323 vi->rvq->vq_ops->kick(vi->rvq); 324 } 325 326 static void try_fill_recv(struct virtnet_info *vi) 327 { 328 struct sk_buff *skb; 329 struct scatterlist sg[1]; 330 int err; 331 332 if (!vi->mergeable_rx_bufs) { 333 try_fill_recv_maxbufs(vi); 334 return; 335 } 336 337 for (;;) { 338 skb_frag_t *f; 339 340 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 341 if (unlikely(!skb)) 342 break; 343 344 skb_reserve(skb, NET_IP_ALIGN); 345 346 f = &skb_shinfo(skb)->frags[0]; 347 f->page = get_a_page(vi, GFP_ATOMIC); 348 if (!f->page) { 349 kfree_skb(skb); 350 break; 351 } 352 353 f->page_offset = 0; 354 f->size = PAGE_SIZE; 355 356 skb_shinfo(skb)->nr_frags++; 357 358 sg_init_one(sg, page_address(f->page), PAGE_SIZE); 359 skb_queue_head(&vi->recv, skb); 360 361 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb); 362 if (err) { 363 skb_unlink(skb, &vi->recv); 364 kfree_skb(skb); 365 break; 366 } 367 vi->num++; 368 } 369 if (unlikely(vi->num > vi->max)) 370 vi->max = vi->num; 371 vi->rvq->vq_ops->kick(vi->rvq); 372 } 373 374 static void skb_recv_done(struct virtqueue *rvq) 375 { 376 struct virtnet_info *vi = rvq->vdev->priv; 377 /* Schedule NAPI, Suppress further interrupts if successful. */ 378 if (netif_rx_schedule_prep(&vi->napi)) { 379 rvq->vq_ops->disable_cb(rvq); 380 __netif_rx_schedule(&vi->napi); 381 } 382 } 383 384 static int virtnet_poll(struct napi_struct *napi, int budget) 385 { 386 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 387 struct sk_buff *skb = NULL; 388 unsigned int len, received = 0; 389 390 again: 391 while (received < budget && 392 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { 393 __skb_unlink(skb, &vi->recv); 394 receive_skb(vi->dev, skb, len); 395 vi->num--; 396 received++; 397 } 398 399 /* FIXME: If we oom and completely run out of inbufs, we need 400 * to start a timer trying to fill more. */ 401 if (vi->num < vi->max / 2) 402 try_fill_recv(vi); 403 404 /* Out of packets? */ 405 if (received < budget) { 406 netif_rx_complete(napi); 407 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) 408 && napi_schedule_prep(napi)) { 409 vi->rvq->vq_ops->disable_cb(vi->rvq); 410 __netif_rx_schedule(napi); 411 goto again; 412 } 413 } 414 415 return received; 416 } 417 418 static void free_old_xmit_skbs(struct virtnet_info *vi) 419 { 420 struct sk_buff *skb; 421 unsigned int len; 422 423 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 424 pr_debug("Sent skb %p\n", skb); 425 __skb_unlink(skb, &vi->send); 426 vi->dev->stats.tx_bytes += skb->len; 427 vi->dev->stats.tx_packets++; 428 kfree_skb(skb); 429 } 430 } 431 432 /* If the virtio transport doesn't always notify us when all in-flight packets 433 * are consumed, we fall back to using this function on a timer to free them. */ 434 static void xmit_free(unsigned long data) 435 { 436 struct virtnet_info *vi = (void *)data; 437 438 netif_tx_lock(vi->dev); 439 440 free_old_xmit_skbs(vi); 441 442 if (!skb_queue_empty(&vi->send)) 443 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); 444 445 netif_tx_unlock(vi->dev); 446 } 447 448 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 449 { 450 int num, err; 451 struct scatterlist sg[2+MAX_SKB_FRAGS]; 452 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb); 453 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 454 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 455 456 sg_init_table(sg, 2+MAX_SKB_FRAGS); 457 458 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 459 460 if (skb->ip_summed == CHECKSUM_PARTIAL) { 461 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 462 hdr->csum_start = skb->csum_start - skb_headroom(skb); 463 hdr->csum_offset = skb->csum_offset; 464 } else { 465 hdr->flags = 0; 466 hdr->csum_offset = hdr->csum_start = 0; 467 } 468 469 if (skb_is_gso(skb)) { 470 hdr->hdr_len = skb_transport_header(skb) - skb->data; 471 hdr->gso_size = skb_shinfo(skb)->gso_size; 472 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 473 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 474 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 475 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 476 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 477 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; 478 else 479 BUG(); 480 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 481 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 482 } else { 483 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 484 hdr->gso_size = hdr->hdr_len = 0; 485 } 486 487 mhdr->num_buffers = 0; 488 489 /* Encode metadata header at front. */ 490 if (vi->mergeable_rx_bufs) 491 sg_set_buf(sg, mhdr, sizeof(*mhdr)); 492 else 493 sg_set_buf(sg, hdr, sizeof(*hdr)); 494 495 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 496 497 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb); 498 if (!err && !vi->free_in_tasklet) 499 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10)); 500 501 return err; 502 } 503 504 static void xmit_tasklet(unsigned long data) 505 { 506 struct virtnet_info *vi = (void *)data; 507 508 netif_tx_lock_bh(vi->dev); 509 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) == 0) { 510 vi->svq->vq_ops->kick(vi->svq); 511 vi->last_xmit_skb = NULL; 512 } 513 if (vi->free_in_tasklet) 514 free_old_xmit_skbs(vi); 515 netif_tx_unlock_bh(vi->dev); 516 } 517 518 static int start_xmit(struct sk_buff *skb, struct net_device *dev) 519 { 520 struct virtnet_info *vi = netdev_priv(dev); 521 522 again: 523 /* Free up any pending old buffers before queueing new ones. */ 524 free_old_xmit_skbs(vi); 525 526 /* If we has a buffer left over from last time, send it now. */ 527 if (unlikely(vi->last_xmit_skb) && 528 xmit_skb(vi, vi->last_xmit_skb) != 0) 529 goto stop_queue; 530 531 vi->last_xmit_skb = NULL; 532 533 /* Put new one in send queue and do transmit */ 534 if (likely(skb)) { 535 __skb_queue_head(&vi->send, skb); 536 if (xmit_skb(vi, skb) != 0) { 537 vi->last_xmit_skb = skb; 538 skb = NULL; 539 goto stop_queue; 540 } 541 } 542 done: 543 vi->svq->vq_ops->kick(vi->svq); 544 return NETDEV_TX_OK; 545 546 stop_queue: 547 pr_debug("%s: virtio not prepared to send\n", dev->name); 548 netif_stop_queue(dev); 549 550 /* Activate callback for using skbs: if this returns false it 551 * means some were used in the meantime. */ 552 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 553 vi->svq->vq_ops->disable_cb(vi->svq); 554 netif_start_queue(dev); 555 goto again; 556 } 557 if (skb) { 558 /* Drop this skb: we only queue one. */ 559 vi->dev->stats.tx_dropped++; 560 kfree_skb(skb); 561 } 562 goto done; 563 } 564 565 #ifdef CONFIG_NET_POLL_CONTROLLER 566 static void virtnet_netpoll(struct net_device *dev) 567 { 568 struct virtnet_info *vi = netdev_priv(dev); 569 570 napi_schedule(&vi->napi); 571 } 572 #endif 573 574 static int virtnet_open(struct net_device *dev) 575 { 576 struct virtnet_info *vi = netdev_priv(dev); 577 578 napi_enable(&vi->napi); 579 580 /* If all buffers were filled by other side before we napi_enabled, we 581 * won't get another interrupt, so process any outstanding packets 582 * now. virtnet_poll wants re-enable the queue, so we disable here. 583 * We synchronize against interrupts via NAPI_STATE_SCHED */ 584 if (netif_rx_schedule_prep(&vi->napi)) { 585 vi->rvq->vq_ops->disable_cb(vi->rvq); 586 __netif_rx_schedule(&vi->napi); 587 } 588 return 0; 589 } 590 591 static int virtnet_close(struct net_device *dev) 592 { 593 struct virtnet_info *vi = netdev_priv(dev); 594 595 napi_disable(&vi->napi); 596 597 return 0; 598 } 599 600 static int virtnet_set_tx_csum(struct net_device *dev, u32 data) 601 { 602 struct virtnet_info *vi = netdev_priv(dev); 603 struct virtio_device *vdev = vi->vdev; 604 605 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) 606 return -ENOSYS; 607 608 return ethtool_op_set_tx_hw_csum(dev, data); 609 } 610 611 static struct ethtool_ops virtnet_ethtool_ops = { 612 .set_tx_csum = virtnet_set_tx_csum, 613 .set_sg = ethtool_op_set_sg, 614 .set_tso = ethtool_op_set_tso, 615 .get_link = ethtool_op_get_link, 616 }; 617 618 #define MIN_MTU 68 619 #define MAX_MTU 65535 620 621 static int virtnet_change_mtu(struct net_device *dev, int new_mtu) 622 { 623 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU) 624 return -EINVAL; 625 dev->mtu = new_mtu; 626 return 0; 627 } 628 629 static const struct net_device_ops virtnet_netdev = { 630 .ndo_open = virtnet_open, 631 .ndo_stop = virtnet_close, 632 .ndo_start_xmit = start_xmit, 633 .ndo_validate_addr = eth_validate_addr, 634 .ndo_set_mac_address = eth_mac_addr, 635 .ndo_change_mtu = virtnet_change_mtu, 636 #ifdef CONFIG_NET_POLL_CONTROLLER 637 .ndo_poll_controller = virtnet_netpoll, 638 #endif 639 }; 640 641 static int virtnet_probe(struct virtio_device *vdev) 642 { 643 int err; 644 struct net_device *dev; 645 struct virtnet_info *vi; 646 647 /* Allocate ourselves a network device with room for our info */ 648 dev = alloc_etherdev(sizeof(struct virtnet_info)); 649 if (!dev) 650 return -ENOMEM; 651 652 /* Set up network device as normal. */ 653 dev->netdev_ops = &virtnet_netdev; 654 dev->features = NETIF_F_HIGHDMA; 655 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); 656 SET_NETDEV_DEV(dev, &vdev->dev); 657 658 /* Do we support "hardware" checksums? */ 659 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 660 /* This opens up the world of extra features. */ 661 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 662 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 663 dev->features |= NETIF_F_TSO | NETIF_F_UFO 664 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 665 } 666 /* Individual feature bits: what can host handle? */ 667 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4)) 668 dev->features |= NETIF_F_TSO; 669 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6)) 670 dev->features |= NETIF_F_TSO6; 671 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 672 dev->features |= NETIF_F_TSO_ECN; 673 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO)) 674 dev->features |= NETIF_F_UFO; 675 } 676 677 /* Configuration may specify what MAC to use. Otherwise random. */ 678 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) { 679 vdev->config->get(vdev, 680 offsetof(struct virtio_net_config, mac), 681 dev->dev_addr, dev->addr_len); 682 } else 683 random_ether_addr(dev->dev_addr); 684 685 /* Set up our device-specific information */ 686 vi = netdev_priv(dev); 687 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight); 688 vi->dev = dev; 689 vi->vdev = vdev; 690 vdev->priv = vi; 691 vi->pages = NULL; 692 693 /* If they give us a callback when all buffers are done, we don't need 694 * the timer. */ 695 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); 696 697 /* If we can receive ANY GSO packets, we must allocate large ones. */ 698 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) 699 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) 700 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN)) 701 vi->big_packets = true; 702 703 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 704 vi->mergeable_rx_bufs = true; 705 706 /* We expect two virtqueues, receive then send. */ 707 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 708 if (IS_ERR(vi->rvq)) { 709 err = PTR_ERR(vi->rvq); 710 goto free; 711 } 712 713 vi->svq = vdev->config->find_vq(vdev, 1, skb_xmit_done); 714 if (IS_ERR(vi->svq)) { 715 err = PTR_ERR(vi->svq); 716 goto free_recv; 717 } 718 719 /* Initialize our empty receive and send queues. */ 720 skb_queue_head_init(&vi->recv); 721 skb_queue_head_init(&vi->send); 722 723 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi); 724 725 if (!vi->free_in_tasklet) 726 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi); 727 728 err = register_netdev(dev); 729 if (err) { 730 pr_debug("virtio_net: registering device failed\n"); 731 goto free_send; 732 } 733 734 /* Last of all, set up some receive buffers. */ 735 try_fill_recv(vi); 736 737 /* If we didn't even get one input buffer, we're useless. */ 738 if (vi->num == 0) { 739 err = -ENOMEM; 740 goto unregister; 741 } 742 743 netif_carrier_on(dev); 744 745 pr_debug("virtnet: registered device %s\n", dev->name); 746 return 0; 747 748 unregister: 749 unregister_netdev(dev); 750 free_send: 751 vdev->config->del_vq(vi->svq); 752 free_recv: 753 vdev->config->del_vq(vi->rvq); 754 free: 755 free_netdev(dev); 756 return err; 757 } 758 759 static void virtnet_remove(struct virtio_device *vdev) 760 { 761 struct virtnet_info *vi = vdev->priv; 762 struct sk_buff *skb; 763 764 /* Stop all the virtqueues. */ 765 vdev->config->reset(vdev); 766 767 if (!vi->free_in_tasklet) 768 del_timer_sync(&vi->xmit_free_timer); 769 770 /* Free our skbs in send and recv queues, if any. */ 771 while ((skb = __skb_dequeue(&vi->recv)) != NULL) { 772 kfree_skb(skb); 773 vi->num--; 774 } 775 __skb_queue_purge(&vi->send); 776 777 BUG_ON(vi->num != 0); 778 779 vdev->config->del_vq(vi->svq); 780 vdev->config->del_vq(vi->rvq); 781 unregister_netdev(vi->dev); 782 783 while (vi->pages) 784 __free_pages(get_a_page(vi, GFP_KERNEL), 0); 785 786 free_netdev(vi->dev); 787 } 788 789 static struct virtio_device_id id_table[] = { 790 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID }, 791 { 0 }, 792 }; 793 794 static unsigned int features[] = { 795 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 796 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 797 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 798 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 799 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */ 800 VIRTIO_NET_F_MRG_RXBUF, 801 VIRTIO_F_NOTIFY_ON_EMPTY, 802 }; 803 804 static struct virtio_driver virtio_net = { 805 .feature_table = features, 806 .feature_table_size = ARRAY_SIZE(features), 807 .driver.name = KBUILD_MODNAME, 808 .driver.owner = THIS_MODULE, 809 .id_table = id_table, 810 .probe = virtnet_probe, 811 .remove = __devexit_p(virtnet_remove), 812 }; 813 814 static int __init init(void) 815 { 816 return register_virtio_driver(&virtio_net); 817 } 818 819 static void __exit fini(void) 820 { 821 unregister_virtio_driver(&virtio_net); 822 } 823 module_init(init); 824 module_exit(fini); 825 826 MODULE_DEVICE_TABLE(virtio, id_table); 827 MODULE_DESCRIPTION("Virtio network driver"); 828 MODULE_LICENSE("GPL"); 829