1 /* 2 * Virtual network driver for conversing with remote driver backends. 3 * 4 * Copyright (c) 2002-2005, K A Fraser 5 * Copyright (c) 2005, XenSource Ltd 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License version 2 9 * as published by the Free Software Foundation; or, when distributed 10 * separately from the Linux kernel or incorporated into other 11 * software packages, subject to the following license: 12 * 13 * Permission is hereby granted, free of charge, to any person obtaining a copy 14 * of this source file (the "Software"), to deal in the Software without 15 * restriction, including without limitation the rights to use, copy, modify, 16 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 17 * and to permit persons to whom the Software is furnished to do so, subject to 18 * the following conditions: 19 * 20 * The above copyright notice and this permission notice shall be included in 21 * all copies or substantial portions of the Software. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 26 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 28 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 29 * IN THE SOFTWARE. 30 */ 31 32 #include <linux/module.h> 33 #include <linux/kernel.h> 34 #include <linux/netdevice.h> 35 #include <linux/etherdevice.h> 36 #include <linux/skbuff.h> 37 #include <linux/ethtool.h> 38 #include <linux/if_ether.h> 39 #include <linux/tcp.h> 40 #include <linux/udp.h> 41 #include <linux/moduleparam.h> 42 #include <linux/mm.h> 43 #include <net/ip.h> 44 45 #include <xen/xenbus.h> 46 #include <xen/events.h> 47 #include <xen/page.h> 48 #include <xen/grant_table.h> 49 50 #include <xen/interface/io/netif.h> 51 #include <xen/interface/memory.h> 52 #include <xen/interface/grant_table.h> 53 54 static struct ethtool_ops xennet_ethtool_ops; 55 56 struct netfront_cb { 57 struct page *page; 58 unsigned offset; 59 }; 60 61 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) 62 63 #define RX_COPY_THRESHOLD 256 64 65 #define GRANT_INVALID_REF 0 66 67 #define NET_TX_RING_SIZE __RING_SIZE((struct xen_netif_tx_sring *)0, PAGE_SIZE) 68 #define NET_RX_RING_SIZE __RING_SIZE((struct xen_netif_rx_sring *)0, PAGE_SIZE) 69 #define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 70 71 struct netfront_info { 72 struct list_head list; 73 struct net_device *netdev; 74 75 struct napi_struct napi; 76 77 unsigned int evtchn; 78 struct xenbus_device *xbdev; 79 80 spinlock_t tx_lock; 81 struct xen_netif_tx_front_ring tx; 82 int tx_ring_ref; 83 84 /* 85 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 86 * are linked from tx_skb_freelist through skb_entry.link. 87 * 88 * NB. Freelist index entries are always going to be less than 89 * PAGE_OFFSET, whereas pointers to skbs will always be equal or 90 * greater than PAGE_OFFSET: we use this property to distinguish 91 * them. 92 */ 93 union skb_entry { 94 struct sk_buff *skb; 95 unsigned long link; 96 } tx_skbs[NET_TX_RING_SIZE]; 97 grant_ref_t gref_tx_head; 98 grant_ref_t grant_tx_ref[NET_TX_RING_SIZE]; 99 unsigned tx_skb_freelist; 100 101 spinlock_t rx_lock ____cacheline_aligned_in_smp; 102 struct xen_netif_rx_front_ring rx; 103 int rx_ring_ref; 104 105 /* Receive-ring batched refills. */ 106 #define RX_MIN_TARGET 8 107 #define RX_DFL_MIN_TARGET 64 108 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 109 unsigned rx_min_target, rx_max_target, rx_target; 110 struct sk_buff_head rx_batch; 111 112 struct timer_list rx_refill_timer; 113 114 struct sk_buff *rx_skbs[NET_RX_RING_SIZE]; 115 grant_ref_t gref_rx_head; 116 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE]; 117 118 unsigned long rx_pfn_array[NET_RX_RING_SIZE]; 119 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1]; 120 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 121 }; 122 123 struct netfront_rx_info { 124 struct xen_netif_rx_response rx; 125 struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1]; 126 }; 127 128 static void skb_entry_set_link(union skb_entry *list, unsigned short id) 129 { 130 list->link = id; 131 } 132 133 static int skb_entry_is_link(const union skb_entry *list) 134 { 135 BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link)); 136 return ((unsigned long)list->skb < PAGE_OFFSET); 137 } 138 139 /* 140 * Access macros for acquiring freeing slots in tx_skbs[]. 141 */ 142 143 static void add_id_to_freelist(unsigned *head, union skb_entry *list, 144 unsigned short id) 145 { 146 skb_entry_set_link(&list[id], *head); 147 *head = id; 148 } 149 150 static unsigned short get_id_from_freelist(unsigned *head, 151 union skb_entry *list) 152 { 153 unsigned int id = *head; 154 *head = list[id].link; 155 return id; 156 } 157 158 static int xennet_rxidx(RING_IDX idx) 159 { 160 return idx & (NET_RX_RING_SIZE - 1); 161 } 162 163 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np, 164 RING_IDX ri) 165 { 166 int i = xennet_rxidx(ri); 167 struct sk_buff *skb = np->rx_skbs[i]; 168 np->rx_skbs[i] = NULL; 169 return skb; 170 } 171 172 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np, 173 RING_IDX ri) 174 { 175 int i = xennet_rxidx(ri); 176 grant_ref_t ref = np->grant_rx_ref[i]; 177 np->grant_rx_ref[i] = GRANT_INVALID_REF; 178 return ref; 179 } 180 181 #ifdef CONFIG_SYSFS 182 static int xennet_sysfs_addif(struct net_device *netdev); 183 static void xennet_sysfs_delif(struct net_device *netdev); 184 #else /* !CONFIG_SYSFS */ 185 #define xennet_sysfs_addif(dev) (0) 186 #define xennet_sysfs_delif(dev) do { } while (0) 187 #endif 188 189 static int xennet_can_sg(struct net_device *dev) 190 { 191 return dev->features & NETIF_F_SG; 192 } 193 194 195 static void rx_refill_timeout(unsigned long data) 196 { 197 struct net_device *dev = (struct net_device *)data; 198 struct netfront_info *np = netdev_priv(dev); 199 netif_rx_schedule(dev, &np->napi); 200 } 201 202 static int netfront_tx_slot_available(struct netfront_info *np) 203 { 204 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) < 205 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2)); 206 } 207 208 static void xennet_maybe_wake_tx(struct net_device *dev) 209 { 210 struct netfront_info *np = netdev_priv(dev); 211 212 if (unlikely(netif_queue_stopped(dev)) && 213 netfront_tx_slot_available(np) && 214 likely(netif_running(dev))) 215 netif_wake_queue(dev); 216 } 217 218 static void xennet_alloc_rx_buffers(struct net_device *dev) 219 { 220 unsigned short id; 221 struct netfront_info *np = netdev_priv(dev); 222 struct sk_buff *skb; 223 struct page *page; 224 int i, batch_target, notify; 225 RING_IDX req_prod = np->rx.req_prod_pvt; 226 grant_ref_t ref; 227 unsigned long pfn; 228 void *vaddr; 229 struct xen_netif_rx_request *req; 230 231 if (unlikely(!netif_carrier_ok(dev))) 232 return; 233 234 /* 235 * Allocate skbuffs greedily, even though we batch updates to the 236 * receive ring. This creates a less bursty demand on the memory 237 * allocator, so should reduce the chance of failed allocation requests 238 * both for ourself and for other kernel subsystems. 239 */ 240 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons); 241 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) { 242 skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN, 243 GFP_ATOMIC | __GFP_NOWARN); 244 if (unlikely(!skb)) 245 goto no_skb; 246 247 /* Align ip header to a 16 bytes boundary */ 248 skb_reserve(skb, NET_IP_ALIGN); 249 250 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); 251 if (!page) { 252 kfree_skb(skb); 253 no_skb: 254 /* Any skbuffs queued for refill? Force them out. */ 255 if (i != 0) 256 goto refill; 257 /* Could not allocate any skbuffs. Try again later. */ 258 mod_timer(&np->rx_refill_timer, 259 jiffies + (HZ/10)); 260 break; 261 } 262 263 skb_shinfo(skb)->frags[0].page = page; 264 skb_shinfo(skb)->nr_frags = 1; 265 __skb_queue_tail(&np->rx_batch, skb); 266 } 267 268 /* Is the batch large enough to be worthwhile? */ 269 if (i < (np->rx_target/2)) { 270 if (req_prod > np->rx.sring->req_prod) 271 goto push; 272 return; 273 } 274 275 /* Adjust our fill target if we risked running out of buffers. */ 276 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) && 277 ((np->rx_target *= 2) > np->rx_max_target)) 278 np->rx_target = np->rx_max_target; 279 280 refill: 281 for (i = 0; ; i++) { 282 skb = __skb_dequeue(&np->rx_batch); 283 if (skb == NULL) 284 break; 285 286 skb->dev = dev; 287 288 id = xennet_rxidx(req_prod + i); 289 290 BUG_ON(np->rx_skbs[id]); 291 np->rx_skbs[id] = skb; 292 293 ref = gnttab_claim_grant_reference(&np->gref_rx_head); 294 BUG_ON((signed short)ref < 0); 295 np->grant_rx_ref[id] = ref; 296 297 pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page); 298 vaddr = page_address(skb_shinfo(skb)->frags[0].page); 299 300 req = RING_GET_REQUEST(&np->rx, req_prod + i); 301 gnttab_grant_foreign_access_ref(ref, 302 np->xbdev->otherend_id, 303 pfn_to_mfn(pfn), 304 0); 305 306 req->id = id; 307 req->gref = ref; 308 } 309 310 wmb(); /* barrier so backend seens requests */ 311 312 /* Above is a suitable barrier to ensure backend will see requests. */ 313 np->rx.req_prod_pvt = req_prod + i; 314 push: 315 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify); 316 if (notify) 317 notify_remote_via_irq(np->netdev->irq); 318 } 319 320 static int xennet_open(struct net_device *dev) 321 { 322 struct netfront_info *np = netdev_priv(dev); 323 324 napi_enable(&np->napi); 325 326 spin_lock_bh(&np->rx_lock); 327 if (netif_carrier_ok(dev)) { 328 xennet_alloc_rx_buffers(dev); 329 np->rx.sring->rsp_event = np->rx.rsp_cons + 1; 330 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 331 netif_rx_schedule(dev, &np->napi); 332 } 333 spin_unlock_bh(&np->rx_lock); 334 335 netif_start_queue(dev); 336 337 return 0; 338 } 339 340 static void xennet_tx_buf_gc(struct net_device *dev) 341 { 342 RING_IDX cons, prod; 343 unsigned short id; 344 struct netfront_info *np = netdev_priv(dev); 345 struct sk_buff *skb; 346 347 BUG_ON(!netif_carrier_ok(dev)); 348 349 do { 350 prod = np->tx.sring->rsp_prod; 351 rmb(); /* Ensure we see responses up to 'rp'. */ 352 353 for (cons = np->tx.rsp_cons; cons != prod; cons++) { 354 struct xen_netif_tx_response *txrsp; 355 356 txrsp = RING_GET_RESPONSE(&np->tx, cons); 357 if (txrsp->status == NETIF_RSP_NULL) 358 continue; 359 360 id = txrsp->id; 361 skb = np->tx_skbs[id].skb; 362 if (unlikely(gnttab_query_foreign_access( 363 np->grant_tx_ref[id]) != 0)) { 364 printk(KERN_ALERT "xennet_tx_buf_gc: warning " 365 "-- grant still in use by backend " 366 "domain.\n"); 367 BUG(); 368 } 369 gnttab_end_foreign_access_ref( 370 np->grant_tx_ref[id], GNTMAP_readonly); 371 gnttab_release_grant_reference( 372 &np->gref_tx_head, np->grant_tx_ref[id]); 373 np->grant_tx_ref[id] = GRANT_INVALID_REF; 374 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id); 375 dev_kfree_skb_irq(skb); 376 } 377 378 np->tx.rsp_cons = prod; 379 380 /* 381 * Set a new event, then check for race with update of tx_cons. 382 * Note that it is essential to schedule a callback, no matter 383 * how few buffers are pending. Even if there is space in the 384 * transmit ring, higher layers may be blocked because too much 385 * data is outstanding: in such cases notification from Xen is 386 * likely to be the only kick that we'll get. 387 */ 388 np->tx.sring->rsp_event = 389 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1; 390 mb(); /* update shared area */ 391 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod)); 392 393 xennet_maybe_wake_tx(dev); 394 } 395 396 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev, 397 struct xen_netif_tx_request *tx) 398 { 399 struct netfront_info *np = netdev_priv(dev); 400 char *data = skb->data; 401 unsigned long mfn; 402 RING_IDX prod = np->tx.req_prod_pvt; 403 int frags = skb_shinfo(skb)->nr_frags; 404 unsigned int offset = offset_in_page(data); 405 unsigned int len = skb_headlen(skb); 406 unsigned int id; 407 grant_ref_t ref; 408 int i; 409 410 /* While the header overlaps a page boundary (including being 411 larger than a page), split it it into page-sized chunks. */ 412 while (len > PAGE_SIZE - offset) { 413 tx->size = PAGE_SIZE - offset; 414 tx->flags |= NETTXF_more_data; 415 len -= tx->size; 416 data += tx->size; 417 offset = 0; 418 419 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 420 np->tx_skbs[id].skb = skb_get(skb); 421 tx = RING_GET_REQUEST(&np->tx, prod++); 422 tx->id = id; 423 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 424 BUG_ON((signed short)ref < 0); 425 426 mfn = virt_to_mfn(data); 427 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 428 mfn, GNTMAP_readonly); 429 430 tx->gref = np->grant_tx_ref[id] = ref; 431 tx->offset = offset; 432 tx->size = len; 433 tx->flags = 0; 434 } 435 436 /* Grant backend access to each skb fragment page. */ 437 for (i = 0; i < frags; i++) { 438 skb_frag_t *frag = skb_shinfo(skb)->frags + i; 439 440 tx->flags |= NETTXF_more_data; 441 442 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 443 np->tx_skbs[id].skb = skb_get(skb); 444 tx = RING_GET_REQUEST(&np->tx, prod++); 445 tx->id = id; 446 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 447 BUG_ON((signed short)ref < 0); 448 449 mfn = pfn_to_mfn(page_to_pfn(frag->page)); 450 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id, 451 mfn, GNTMAP_readonly); 452 453 tx->gref = np->grant_tx_ref[id] = ref; 454 tx->offset = frag->page_offset; 455 tx->size = frag->size; 456 tx->flags = 0; 457 } 458 459 np->tx.req_prod_pvt = prod; 460 } 461 462 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) 463 { 464 unsigned short id; 465 struct netfront_info *np = netdev_priv(dev); 466 struct xen_netif_tx_request *tx; 467 struct xen_netif_extra_info *extra; 468 char *data = skb->data; 469 RING_IDX i; 470 grant_ref_t ref; 471 unsigned long mfn; 472 int notify; 473 int frags = skb_shinfo(skb)->nr_frags; 474 unsigned int offset = offset_in_page(data); 475 unsigned int len = skb_headlen(skb); 476 477 frags += DIV_ROUND_UP(offset + len, PAGE_SIZE); 478 if (unlikely(frags > MAX_SKB_FRAGS + 1)) { 479 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n", 480 frags); 481 dump_stack(); 482 goto drop; 483 } 484 485 spin_lock_irq(&np->tx_lock); 486 487 if (unlikely(!netif_carrier_ok(dev) || 488 (frags > 1 && !xennet_can_sg(dev)) || 489 netif_needs_gso(dev, skb))) { 490 spin_unlock_irq(&np->tx_lock); 491 goto drop; 492 } 493 494 i = np->tx.req_prod_pvt; 495 496 id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs); 497 np->tx_skbs[id].skb = skb; 498 499 tx = RING_GET_REQUEST(&np->tx, i); 500 501 tx->id = id; 502 ref = gnttab_claim_grant_reference(&np->gref_tx_head); 503 BUG_ON((signed short)ref < 0); 504 mfn = virt_to_mfn(data); 505 gnttab_grant_foreign_access_ref( 506 ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly); 507 tx->gref = np->grant_tx_ref[id] = ref; 508 tx->offset = offset; 509 tx->size = len; 510 extra = NULL; 511 512 tx->flags = 0; 513 if (skb->ip_summed == CHECKSUM_PARTIAL) 514 /* local packet? */ 515 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated; 516 else if (skb->ip_summed == CHECKSUM_UNNECESSARY) 517 /* remote but checksummed. */ 518 tx->flags |= NETTXF_data_validated; 519 520 if (skb_shinfo(skb)->gso_size) { 521 struct xen_netif_extra_info *gso; 522 523 gso = (struct xen_netif_extra_info *) 524 RING_GET_REQUEST(&np->tx, ++i); 525 526 if (extra) 527 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE; 528 else 529 tx->flags |= NETTXF_extra_info; 530 531 gso->u.gso.size = skb_shinfo(skb)->gso_size; 532 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4; 533 gso->u.gso.pad = 0; 534 gso->u.gso.features = 0; 535 536 gso->type = XEN_NETIF_EXTRA_TYPE_GSO; 537 gso->flags = 0; 538 extra = gso; 539 } 540 541 np->tx.req_prod_pvt = i + 1; 542 543 xennet_make_frags(skb, dev, tx); 544 tx->size = skb->len; 545 546 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify); 547 if (notify) 548 notify_remote_via_irq(np->netdev->irq); 549 550 dev->stats.tx_bytes += skb->len; 551 dev->stats.tx_packets++; 552 553 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 554 xennet_tx_buf_gc(dev); 555 556 if (!netfront_tx_slot_available(np)) 557 netif_stop_queue(dev); 558 559 spin_unlock_irq(&np->tx_lock); 560 561 return 0; 562 563 drop: 564 dev->stats.tx_dropped++; 565 dev_kfree_skb(skb); 566 return 0; 567 } 568 569 static int xennet_close(struct net_device *dev) 570 { 571 struct netfront_info *np = netdev_priv(dev); 572 netif_stop_queue(np->netdev); 573 napi_disable(&np->napi); 574 return 0; 575 } 576 577 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb, 578 grant_ref_t ref) 579 { 580 int new = xennet_rxidx(np->rx.req_prod_pvt); 581 582 BUG_ON(np->rx_skbs[new]); 583 np->rx_skbs[new] = skb; 584 np->grant_rx_ref[new] = ref; 585 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new; 586 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref; 587 np->rx.req_prod_pvt++; 588 } 589 590 static int xennet_get_extras(struct netfront_info *np, 591 struct xen_netif_extra_info *extras, 592 RING_IDX rp) 593 594 { 595 struct xen_netif_extra_info *extra; 596 struct device *dev = &np->netdev->dev; 597 RING_IDX cons = np->rx.rsp_cons; 598 int err = 0; 599 600 do { 601 struct sk_buff *skb; 602 grant_ref_t ref; 603 604 if (unlikely(cons + 1 == rp)) { 605 if (net_ratelimit()) 606 dev_warn(dev, "Missing extra info\n"); 607 err = -EBADR; 608 break; 609 } 610 611 extra = (struct xen_netif_extra_info *) 612 RING_GET_RESPONSE(&np->rx, ++cons); 613 614 if (unlikely(!extra->type || 615 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) { 616 if (net_ratelimit()) 617 dev_warn(dev, "Invalid extra type: %d\n", 618 extra->type); 619 err = -EINVAL; 620 } else { 621 memcpy(&extras[extra->type - 1], extra, 622 sizeof(*extra)); 623 } 624 625 skb = xennet_get_rx_skb(np, cons); 626 ref = xennet_get_rx_ref(np, cons); 627 xennet_move_rx_slot(np, skb, ref); 628 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); 629 630 np->rx.rsp_cons = cons; 631 return err; 632 } 633 634 static int xennet_get_responses(struct netfront_info *np, 635 struct netfront_rx_info *rinfo, RING_IDX rp, 636 struct sk_buff_head *list) 637 { 638 struct xen_netif_rx_response *rx = &rinfo->rx; 639 struct xen_netif_extra_info *extras = rinfo->extras; 640 struct device *dev = &np->netdev->dev; 641 RING_IDX cons = np->rx.rsp_cons; 642 struct sk_buff *skb = xennet_get_rx_skb(np, cons); 643 grant_ref_t ref = xennet_get_rx_ref(np, cons); 644 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD); 645 int frags = 1; 646 int err = 0; 647 unsigned long ret; 648 649 if (rx->flags & NETRXF_extra_info) { 650 err = xennet_get_extras(np, extras, rp); 651 cons = np->rx.rsp_cons; 652 } 653 654 for (;;) { 655 if (unlikely(rx->status < 0 || 656 rx->offset + rx->status > PAGE_SIZE)) { 657 if (net_ratelimit()) 658 dev_warn(dev, "rx->offset: %x, size: %u\n", 659 rx->offset, rx->status); 660 xennet_move_rx_slot(np, skb, ref); 661 err = -EINVAL; 662 goto next; 663 } 664 665 /* 666 * This definitely indicates a bug, either in this driver or in 667 * the backend driver. In future this should flag the bad 668 * situation to the system controller to reboot the backed. 669 */ 670 if (ref == GRANT_INVALID_REF) { 671 if (net_ratelimit()) 672 dev_warn(dev, "Bad rx response id %d.\n", 673 rx->id); 674 err = -EINVAL; 675 goto next; 676 } 677 678 ret = gnttab_end_foreign_access_ref(ref, 0); 679 BUG_ON(!ret); 680 681 gnttab_release_grant_reference(&np->gref_rx_head, ref); 682 683 __skb_queue_tail(list, skb); 684 685 next: 686 if (!(rx->flags & NETRXF_more_data)) 687 break; 688 689 if (cons + frags == rp) { 690 if (net_ratelimit()) 691 dev_warn(dev, "Need more frags\n"); 692 err = -ENOENT; 693 break; 694 } 695 696 rx = RING_GET_RESPONSE(&np->rx, cons + frags); 697 skb = xennet_get_rx_skb(np, cons + frags); 698 ref = xennet_get_rx_ref(np, cons + frags); 699 frags++; 700 } 701 702 if (unlikely(frags > max)) { 703 if (net_ratelimit()) 704 dev_warn(dev, "Too many frags\n"); 705 err = -E2BIG; 706 } 707 708 if (unlikely(err)) 709 np->rx.rsp_cons = cons + frags; 710 711 return err; 712 } 713 714 static int xennet_set_skb_gso(struct sk_buff *skb, 715 struct xen_netif_extra_info *gso) 716 { 717 if (!gso->u.gso.size) { 718 if (net_ratelimit()) 719 printk(KERN_WARNING "GSO size must not be zero.\n"); 720 return -EINVAL; 721 } 722 723 /* Currently only TCPv4 S.O. is supported. */ 724 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) { 725 if (net_ratelimit()) 726 printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type); 727 return -EINVAL; 728 } 729 730 skb_shinfo(skb)->gso_size = gso->u.gso.size; 731 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 732 733 /* Header must be checked, and gso_segs computed. */ 734 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 735 skb_shinfo(skb)->gso_segs = 0; 736 737 return 0; 738 } 739 740 static RING_IDX xennet_fill_frags(struct netfront_info *np, 741 struct sk_buff *skb, 742 struct sk_buff_head *list) 743 { 744 struct skb_shared_info *shinfo = skb_shinfo(skb); 745 int nr_frags = shinfo->nr_frags; 746 RING_IDX cons = np->rx.rsp_cons; 747 skb_frag_t *frag = shinfo->frags + nr_frags; 748 struct sk_buff *nskb; 749 750 while ((nskb = __skb_dequeue(list))) { 751 struct xen_netif_rx_response *rx = 752 RING_GET_RESPONSE(&np->rx, ++cons); 753 754 frag->page = skb_shinfo(nskb)->frags[0].page; 755 frag->page_offset = rx->offset; 756 frag->size = rx->status; 757 758 skb->data_len += rx->status; 759 760 skb_shinfo(nskb)->nr_frags = 0; 761 kfree_skb(nskb); 762 763 frag++; 764 nr_frags++; 765 } 766 767 shinfo->nr_frags = nr_frags; 768 return cons; 769 } 770 771 static int skb_checksum_setup(struct sk_buff *skb) 772 { 773 struct iphdr *iph; 774 unsigned char *th; 775 int err = -EPROTO; 776 777 if (skb->protocol != htons(ETH_P_IP)) 778 goto out; 779 780 iph = (void *)skb->data; 781 th = skb->data + 4 * iph->ihl; 782 if (th >= skb_tail_pointer(skb)) 783 goto out; 784 785 skb->csum_start = th - skb->head; 786 switch (iph->protocol) { 787 case IPPROTO_TCP: 788 skb->csum_offset = offsetof(struct tcphdr, check); 789 break; 790 case IPPROTO_UDP: 791 skb->csum_offset = offsetof(struct udphdr, check); 792 break; 793 default: 794 if (net_ratelimit()) 795 printk(KERN_ERR "Attempting to checksum a non-" 796 "TCP/UDP packet, dropping a protocol" 797 " %d packet", iph->protocol); 798 goto out; 799 } 800 801 if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb)) 802 goto out; 803 804 err = 0; 805 806 out: 807 return err; 808 } 809 810 static int handle_incoming_queue(struct net_device *dev, 811 struct sk_buff_head *rxq) 812 { 813 int packets_dropped = 0; 814 struct sk_buff *skb; 815 816 while ((skb = __skb_dequeue(rxq)) != NULL) { 817 struct page *page = NETFRONT_SKB_CB(skb)->page; 818 void *vaddr = page_address(page); 819 unsigned offset = NETFRONT_SKB_CB(skb)->offset; 820 821 memcpy(skb->data, vaddr + offset, 822 skb_headlen(skb)); 823 824 if (page != skb_shinfo(skb)->frags[0].page) 825 __free_page(page); 826 827 /* Ethernet work: Delayed to here as it peeks the header. */ 828 skb->protocol = eth_type_trans(skb, dev); 829 830 if (skb->ip_summed == CHECKSUM_PARTIAL) { 831 if (skb_checksum_setup(skb)) { 832 kfree_skb(skb); 833 packets_dropped++; 834 dev->stats.rx_errors++; 835 continue; 836 } 837 } 838 839 dev->stats.rx_packets++; 840 dev->stats.rx_bytes += skb->len; 841 842 /* Pass it up. */ 843 netif_receive_skb(skb); 844 } 845 846 return packets_dropped; 847 } 848 849 static int xennet_poll(struct napi_struct *napi, int budget) 850 { 851 struct netfront_info *np = container_of(napi, struct netfront_info, napi); 852 struct net_device *dev = np->netdev; 853 struct sk_buff *skb; 854 struct netfront_rx_info rinfo; 855 struct xen_netif_rx_response *rx = &rinfo.rx; 856 struct xen_netif_extra_info *extras = rinfo.extras; 857 RING_IDX i, rp; 858 int work_done; 859 struct sk_buff_head rxq; 860 struct sk_buff_head errq; 861 struct sk_buff_head tmpq; 862 unsigned long flags; 863 unsigned int len; 864 int err; 865 866 spin_lock(&np->rx_lock); 867 868 skb_queue_head_init(&rxq); 869 skb_queue_head_init(&errq); 870 skb_queue_head_init(&tmpq); 871 872 rp = np->rx.sring->rsp_prod; 873 rmb(); /* Ensure we see queued responses up to 'rp'. */ 874 875 i = np->rx.rsp_cons; 876 work_done = 0; 877 while ((i != rp) && (work_done < budget)) { 878 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx)); 879 memset(extras, 0, sizeof(rinfo.extras)); 880 881 err = xennet_get_responses(np, &rinfo, rp, &tmpq); 882 883 if (unlikely(err)) { 884 err: 885 while ((skb = __skb_dequeue(&tmpq))) 886 __skb_queue_tail(&errq, skb); 887 dev->stats.rx_errors++; 888 i = np->rx.rsp_cons; 889 continue; 890 } 891 892 skb = __skb_dequeue(&tmpq); 893 894 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 895 struct xen_netif_extra_info *gso; 896 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 897 898 if (unlikely(xennet_set_skb_gso(skb, gso))) { 899 __skb_queue_head(&tmpq, skb); 900 np->rx.rsp_cons += skb_queue_len(&tmpq); 901 goto err; 902 } 903 } 904 905 NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page; 906 NETFRONT_SKB_CB(skb)->offset = rx->offset; 907 908 len = rx->status; 909 if (len > RX_COPY_THRESHOLD) 910 len = RX_COPY_THRESHOLD; 911 skb_put(skb, len); 912 913 if (rx->status > len) { 914 skb_shinfo(skb)->frags[0].page_offset = 915 rx->offset + len; 916 skb_shinfo(skb)->frags[0].size = rx->status - len; 917 skb->data_len = rx->status - len; 918 } else { 919 skb_shinfo(skb)->frags[0].page = NULL; 920 skb_shinfo(skb)->nr_frags = 0; 921 } 922 923 i = xennet_fill_frags(np, skb, &tmpq); 924 925 /* 926 * Truesize approximates the size of true data plus 927 * any supervisor overheads. Adding hypervisor 928 * overheads has been shown to significantly reduce 929 * achievable bandwidth with the default receive 930 * buffer size. It is therefore not wise to account 931 * for it here. 932 * 933 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set 934 * to RX_COPY_THRESHOLD + the supervisor 935 * overheads. Here, we add the size of the data pulled 936 * in xennet_fill_frags(). 937 * 938 * We also adjust for any unused space in the main 939 * data area by subtracting (RX_COPY_THRESHOLD - 940 * len). This is especially important with drivers 941 * which split incoming packets into header and data, 942 * using only 66 bytes of the main data area (see the 943 * e1000 driver for example.) On such systems, 944 * without this last adjustement, our achievable 945 * receive throughout using the standard receive 946 * buffer size was cut by 25%(!!!). 947 */ 948 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); 949 skb->len += skb->data_len; 950 951 if (rx->flags & NETRXF_csum_blank) 952 skb->ip_summed = CHECKSUM_PARTIAL; 953 else if (rx->flags & NETRXF_data_validated) 954 skb->ip_summed = CHECKSUM_UNNECESSARY; 955 956 __skb_queue_tail(&rxq, skb); 957 958 np->rx.rsp_cons = ++i; 959 work_done++; 960 } 961 962 __skb_queue_purge(&errq); 963 964 work_done -= handle_incoming_queue(dev, &rxq); 965 966 /* If we get a callback with very few responses, reduce fill target. */ 967 /* NB. Note exponential increase, linear decrease. */ 968 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) > 969 ((3*np->rx_target) / 4)) && 970 (--np->rx_target < np->rx_min_target)) 971 np->rx_target = np->rx_min_target; 972 973 xennet_alloc_rx_buffers(dev); 974 975 if (work_done < budget) { 976 int more_to_do = 0; 977 978 local_irq_save(flags); 979 980 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do); 981 if (!more_to_do) 982 __netif_rx_complete(dev, napi); 983 984 local_irq_restore(flags); 985 } 986 987 spin_unlock(&np->rx_lock); 988 989 return work_done; 990 } 991 992 static int xennet_change_mtu(struct net_device *dev, int mtu) 993 { 994 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN; 995 996 if (mtu > max) 997 return -EINVAL; 998 dev->mtu = mtu; 999 return 0; 1000 } 1001 1002 static void xennet_release_tx_bufs(struct netfront_info *np) 1003 { 1004 struct sk_buff *skb; 1005 int i; 1006 1007 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1008 /* Skip over entries which are actually freelist references */ 1009 if (skb_entry_is_link(&np->tx_skbs[i])) 1010 continue; 1011 1012 skb = np->tx_skbs[i].skb; 1013 gnttab_end_foreign_access_ref(np->grant_tx_ref[i], 1014 GNTMAP_readonly); 1015 gnttab_release_grant_reference(&np->gref_tx_head, 1016 np->grant_tx_ref[i]); 1017 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1018 add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i); 1019 dev_kfree_skb_irq(skb); 1020 } 1021 } 1022 1023 static void xennet_release_rx_bufs(struct netfront_info *np) 1024 { 1025 struct mmu_update *mmu = np->rx_mmu; 1026 struct multicall_entry *mcl = np->rx_mcl; 1027 struct sk_buff_head free_list; 1028 struct sk_buff *skb; 1029 unsigned long mfn; 1030 int xfer = 0, noxfer = 0, unused = 0; 1031 int id, ref; 1032 1033 dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n", 1034 __func__); 1035 return; 1036 1037 skb_queue_head_init(&free_list); 1038 1039 spin_lock_bh(&np->rx_lock); 1040 1041 for (id = 0; id < NET_RX_RING_SIZE; id++) { 1042 ref = np->grant_rx_ref[id]; 1043 if (ref == GRANT_INVALID_REF) { 1044 unused++; 1045 continue; 1046 } 1047 1048 skb = np->rx_skbs[id]; 1049 mfn = gnttab_end_foreign_transfer_ref(ref); 1050 gnttab_release_grant_reference(&np->gref_rx_head, ref); 1051 np->grant_rx_ref[id] = GRANT_INVALID_REF; 1052 1053 if (0 == mfn) { 1054 skb_shinfo(skb)->nr_frags = 0; 1055 dev_kfree_skb(skb); 1056 noxfer++; 1057 continue; 1058 } 1059 1060 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1061 /* Remap the page. */ 1062 struct page *page = skb_shinfo(skb)->frags[0].page; 1063 unsigned long pfn = page_to_pfn(page); 1064 void *vaddr = page_address(page); 1065 1066 MULTI_update_va_mapping(mcl, (unsigned long)vaddr, 1067 mfn_pte(mfn, PAGE_KERNEL), 1068 0); 1069 mcl++; 1070 mmu->ptr = ((u64)mfn << PAGE_SHIFT) 1071 | MMU_MACHPHYS_UPDATE; 1072 mmu->val = pfn; 1073 mmu++; 1074 1075 set_phys_to_machine(pfn, mfn); 1076 } 1077 __skb_queue_tail(&free_list, skb); 1078 xfer++; 1079 } 1080 1081 dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n", 1082 __func__, xfer, noxfer, unused); 1083 1084 if (xfer) { 1085 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 1086 /* Do all the remapping work and M2P updates. */ 1087 MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu, 1088 NULL, DOMID_SELF); 1089 mcl++; 1090 HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl); 1091 } 1092 } 1093 1094 __skb_queue_purge(&free_list); 1095 1096 spin_unlock_bh(&np->rx_lock); 1097 } 1098 1099 static void xennet_uninit(struct net_device *dev) 1100 { 1101 struct netfront_info *np = netdev_priv(dev); 1102 xennet_release_tx_bufs(np); 1103 xennet_release_rx_bufs(np); 1104 gnttab_free_grant_references(np->gref_tx_head); 1105 gnttab_free_grant_references(np->gref_rx_head); 1106 } 1107 1108 static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev) 1109 { 1110 int i, err; 1111 struct net_device *netdev; 1112 struct netfront_info *np; 1113 1114 netdev = alloc_etherdev(sizeof(struct netfront_info)); 1115 if (!netdev) { 1116 printk(KERN_WARNING "%s> alloc_etherdev failed.\n", 1117 __func__); 1118 return ERR_PTR(-ENOMEM); 1119 } 1120 1121 np = netdev_priv(netdev); 1122 np->xbdev = dev; 1123 1124 spin_lock_init(&np->tx_lock); 1125 spin_lock_init(&np->rx_lock); 1126 1127 skb_queue_head_init(&np->rx_batch); 1128 np->rx_target = RX_DFL_MIN_TARGET; 1129 np->rx_min_target = RX_DFL_MIN_TARGET; 1130 np->rx_max_target = RX_MAX_TARGET; 1131 1132 init_timer(&np->rx_refill_timer); 1133 np->rx_refill_timer.data = (unsigned long)netdev; 1134 np->rx_refill_timer.function = rx_refill_timeout; 1135 1136 /* Initialise tx_skbs as a free chain containing every entry. */ 1137 np->tx_skb_freelist = 0; 1138 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1139 skb_entry_set_link(&np->tx_skbs[i], i+1); 1140 np->grant_tx_ref[i] = GRANT_INVALID_REF; 1141 } 1142 1143 /* Clear out rx_skbs */ 1144 for (i = 0; i < NET_RX_RING_SIZE; i++) { 1145 np->rx_skbs[i] = NULL; 1146 np->grant_rx_ref[i] = GRANT_INVALID_REF; 1147 } 1148 1149 /* A grant for every tx ring slot */ 1150 if (gnttab_alloc_grant_references(TX_MAX_TARGET, 1151 &np->gref_tx_head) < 0) { 1152 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 1153 err = -ENOMEM; 1154 goto exit; 1155 } 1156 /* A grant for every rx ring slot */ 1157 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1158 &np->gref_rx_head) < 0) { 1159 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n"); 1160 err = -ENOMEM; 1161 goto exit_free_tx; 1162 } 1163 1164 netdev->open = xennet_open; 1165 netdev->hard_start_xmit = xennet_start_xmit; 1166 netdev->stop = xennet_close; 1167 netif_napi_add(netdev, &np->napi, xennet_poll, 64); 1168 netdev->uninit = xennet_uninit; 1169 netdev->change_mtu = xennet_change_mtu; 1170 netdev->features = NETIF_F_IP_CSUM; 1171 1172 SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops); 1173 SET_NETDEV_DEV(netdev, &dev->dev); 1174 1175 np->netdev = netdev; 1176 1177 netif_carrier_off(netdev); 1178 1179 return netdev; 1180 1181 exit_free_tx: 1182 gnttab_free_grant_references(np->gref_tx_head); 1183 exit: 1184 free_netdev(netdev); 1185 return ERR_PTR(err); 1186 } 1187 1188 /** 1189 * Entry point to this code when a new device is created. Allocate the basic 1190 * structures and the ring buffers for communication with the backend, and 1191 * inform the backend of the appropriate details for those. 1192 */ 1193 static int __devinit netfront_probe(struct xenbus_device *dev, 1194 const struct xenbus_device_id *id) 1195 { 1196 int err; 1197 struct net_device *netdev; 1198 struct netfront_info *info; 1199 1200 netdev = xennet_create_dev(dev); 1201 if (IS_ERR(netdev)) { 1202 err = PTR_ERR(netdev); 1203 xenbus_dev_fatal(dev, err, "creating netdev"); 1204 return err; 1205 } 1206 1207 info = netdev_priv(netdev); 1208 dev->dev.driver_data = info; 1209 1210 err = register_netdev(info->netdev); 1211 if (err) { 1212 printk(KERN_WARNING "%s: register_netdev err=%d\n", 1213 __func__, err); 1214 goto fail; 1215 } 1216 1217 err = xennet_sysfs_addif(info->netdev); 1218 if (err) { 1219 unregister_netdev(info->netdev); 1220 printk(KERN_WARNING "%s: add sysfs failed err=%d\n", 1221 __func__, err); 1222 goto fail; 1223 } 1224 1225 return 0; 1226 1227 fail: 1228 free_netdev(netdev); 1229 dev->dev.driver_data = NULL; 1230 return err; 1231 } 1232 1233 static void xennet_end_access(int ref, void *page) 1234 { 1235 /* This frees the page as a side-effect */ 1236 if (ref != GRANT_INVALID_REF) 1237 gnttab_end_foreign_access(ref, 0, (unsigned long)page); 1238 } 1239 1240 static void xennet_disconnect_backend(struct netfront_info *info) 1241 { 1242 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1243 spin_lock_bh(&info->rx_lock); 1244 spin_lock_irq(&info->tx_lock); 1245 netif_carrier_off(info->netdev); 1246 spin_unlock_irq(&info->tx_lock); 1247 spin_unlock_bh(&info->rx_lock); 1248 1249 if (info->netdev->irq) 1250 unbind_from_irqhandler(info->netdev->irq, info->netdev); 1251 info->evtchn = info->netdev->irq = 0; 1252 1253 /* End access and free the pages */ 1254 xennet_end_access(info->tx_ring_ref, info->tx.sring); 1255 xennet_end_access(info->rx_ring_ref, info->rx.sring); 1256 1257 info->tx_ring_ref = GRANT_INVALID_REF; 1258 info->rx_ring_ref = GRANT_INVALID_REF; 1259 info->tx.sring = NULL; 1260 info->rx.sring = NULL; 1261 } 1262 1263 /** 1264 * We are reconnecting to the backend, due to a suspend/resume, or a backend 1265 * driver restart. We tear down our netif structure and recreate it, but 1266 * leave the device-layer structures intact so that this is transparent to the 1267 * rest of the kernel. 1268 */ 1269 static int netfront_resume(struct xenbus_device *dev) 1270 { 1271 struct netfront_info *info = dev->dev.driver_data; 1272 1273 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1274 1275 xennet_disconnect_backend(info); 1276 return 0; 1277 } 1278 1279 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[]) 1280 { 1281 char *s, *e, *macstr; 1282 int i; 1283 1284 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL); 1285 if (IS_ERR(macstr)) 1286 return PTR_ERR(macstr); 1287 1288 for (i = 0; i < ETH_ALEN; i++) { 1289 mac[i] = simple_strtoul(s, &e, 16); 1290 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) { 1291 kfree(macstr); 1292 return -ENOENT; 1293 } 1294 s = e+1; 1295 } 1296 1297 kfree(macstr); 1298 return 0; 1299 } 1300 1301 static irqreturn_t xennet_interrupt(int irq, void *dev_id) 1302 { 1303 struct net_device *dev = dev_id; 1304 struct netfront_info *np = netdev_priv(dev); 1305 unsigned long flags; 1306 1307 spin_lock_irqsave(&np->tx_lock, flags); 1308 1309 if (likely(netif_carrier_ok(dev))) { 1310 xennet_tx_buf_gc(dev); 1311 /* Under tx_lock: protects access to rx shared-ring indexes. */ 1312 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) 1313 netif_rx_schedule(dev, &np->napi); 1314 } 1315 1316 spin_unlock_irqrestore(&np->tx_lock, flags); 1317 1318 return IRQ_HANDLED; 1319 } 1320 1321 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info) 1322 { 1323 struct xen_netif_tx_sring *txs; 1324 struct xen_netif_rx_sring *rxs; 1325 int err; 1326 struct net_device *netdev = info->netdev; 1327 1328 info->tx_ring_ref = GRANT_INVALID_REF; 1329 info->rx_ring_ref = GRANT_INVALID_REF; 1330 info->rx.sring = NULL; 1331 info->tx.sring = NULL; 1332 netdev->irq = 0; 1333 1334 err = xen_net_read_mac(dev, netdev->dev_addr); 1335 if (err) { 1336 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename); 1337 goto fail; 1338 } 1339 1340 txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1341 if (!txs) { 1342 err = -ENOMEM; 1343 xenbus_dev_fatal(dev, err, "allocating tx ring page"); 1344 goto fail; 1345 } 1346 SHARED_RING_INIT(txs); 1347 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE); 1348 1349 err = xenbus_grant_ring(dev, virt_to_mfn(txs)); 1350 if (err < 0) { 1351 free_page((unsigned long)txs); 1352 goto fail; 1353 } 1354 1355 info->tx_ring_ref = err; 1356 rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH); 1357 if (!rxs) { 1358 err = -ENOMEM; 1359 xenbus_dev_fatal(dev, err, "allocating rx ring page"); 1360 goto fail; 1361 } 1362 SHARED_RING_INIT(rxs); 1363 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE); 1364 1365 err = xenbus_grant_ring(dev, virt_to_mfn(rxs)); 1366 if (err < 0) { 1367 free_page((unsigned long)rxs); 1368 goto fail; 1369 } 1370 info->rx_ring_ref = err; 1371 1372 err = xenbus_alloc_evtchn(dev, &info->evtchn); 1373 if (err) 1374 goto fail; 1375 1376 err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt, 1377 IRQF_SAMPLE_RANDOM, netdev->name, 1378 netdev); 1379 if (err < 0) 1380 goto fail; 1381 netdev->irq = err; 1382 return 0; 1383 1384 fail: 1385 return err; 1386 } 1387 1388 /* Common code used when first setting up, and when resuming. */ 1389 static int talk_to_backend(struct xenbus_device *dev, 1390 struct netfront_info *info) 1391 { 1392 const char *message; 1393 struct xenbus_transaction xbt; 1394 int err; 1395 1396 /* Create shared ring, alloc event channel. */ 1397 err = setup_netfront(dev, info); 1398 if (err) 1399 goto out; 1400 1401 again: 1402 err = xenbus_transaction_start(&xbt); 1403 if (err) { 1404 xenbus_dev_fatal(dev, err, "starting transaction"); 1405 goto destroy_ring; 1406 } 1407 1408 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u", 1409 info->tx_ring_ref); 1410 if (err) { 1411 message = "writing tx ring-ref"; 1412 goto abort_transaction; 1413 } 1414 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u", 1415 info->rx_ring_ref); 1416 if (err) { 1417 message = "writing rx ring-ref"; 1418 goto abort_transaction; 1419 } 1420 err = xenbus_printf(xbt, dev->nodename, 1421 "event-channel", "%u", info->evtchn); 1422 if (err) { 1423 message = "writing event-channel"; 1424 goto abort_transaction; 1425 } 1426 1427 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u", 1428 1); 1429 if (err) { 1430 message = "writing request-rx-copy"; 1431 goto abort_transaction; 1432 } 1433 1434 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1); 1435 if (err) { 1436 message = "writing feature-rx-notify"; 1437 goto abort_transaction; 1438 } 1439 1440 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1); 1441 if (err) { 1442 message = "writing feature-sg"; 1443 goto abort_transaction; 1444 } 1445 1446 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1); 1447 if (err) { 1448 message = "writing feature-gso-tcpv4"; 1449 goto abort_transaction; 1450 } 1451 1452 err = xenbus_transaction_end(xbt, 0); 1453 if (err) { 1454 if (err == -EAGAIN) 1455 goto again; 1456 xenbus_dev_fatal(dev, err, "completing transaction"); 1457 goto destroy_ring; 1458 } 1459 1460 return 0; 1461 1462 abort_transaction: 1463 xenbus_transaction_end(xbt, 1); 1464 xenbus_dev_fatal(dev, err, "%s", message); 1465 destroy_ring: 1466 xennet_disconnect_backend(info); 1467 out: 1468 return err; 1469 } 1470 1471 static int xennet_set_sg(struct net_device *dev, u32 data) 1472 { 1473 if (data) { 1474 struct netfront_info *np = netdev_priv(dev); 1475 int val; 1476 1477 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg", 1478 "%d", &val) < 0) 1479 val = 0; 1480 if (!val) 1481 return -ENOSYS; 1482 } else if (dev->mtu > ETH_DATA_LEN) 1483 dev->mtu = ETH_DATA_LEN; 1484 1485 return ethtool_op_set_sg(dev, data); 1486 } 1487 1488 static int xennet_set_tso(struct net_device *dev, u32 data) 1489 { 1490 if (data) { 1491 struct netfront_info *np = netdev_priv(dev); 1492 int val; 1493 1494 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1495 "feature-gso-tcpv4", "%d", &val) < 0) 1496 val = 0; 1497 if (!val) 1498 return -ENOSYS; 1499 } 1500 1501 return ethtool_op_set_tso(dev, data); 1502 } 1503 1504 static void xennet_set_features(struct net_device *dev) 1505 { 1506 /* Turn off all GSO bits except ROBUST. */ 1507 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1; 1508 dev->features |= NETIF_F_GSO_ROBUST; 1509 xennet_set_sg(dev, 0); 1510 1511 /* We need checksum offload to enable scatter/gather and TSO. */ 1512 if (!(dev->features & NETIF_F_IP_CSUM)) 1513 return; 1514 1515 if (!xennet_set_sg(dev, 1)) 1516 xennet_set_tso(dev, 1); 1517 } 1518 1519 static int xennet_connect(struct net_device *dev) 1520 { 1521 struct netfront_info *np = netdev_priv(dev); 1522 int i, requeue_idx, err; 1523 struct sk_buff *skb; 1524 grant_ref_t ref; 1525 struct xen_netif_rx_request *req; 1526 unsigned int feature_rx_copy; 1527 1528 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend, 1529 "feature-rx-copy", "%u", &feature_rx_copy); 1530 if (err != 1) 1531 feature_rx_copy = 0; 1532 1533 if (!feature_rx_copy) { 1534 dev_info(&dev->dev, 1535 "backend does not support copying receive path\n"); 1536 return -ENODEV; 1537 } 1538 1539 err = talk_to_backend(np->xbdev, np); 1540 if (err) 1541 return err; 1542 1543 xennet_set_features(dev); 1544 1545 spin_lock_bh(&np->rx_lock); 1546 spin_lock_irq(&np->tx_lock); 1547 1548 /* Step 1: Discard all pending TX packet fragments. */ 1549 xennet_release_tx_bufs(np); 1550 1551 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 1552 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 1553 if (!np->rx_skbs[i]) 1554 continue; 1555 1556 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i); 1557 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i); 1558 req = RING_GET_REQUEST(&np->rx, requeue_idx); 1559 1560 gnttab_grant_foreign_access_ref( 1561 ref, np->xbdev->otherend_id, 1562 pfn_to_mfn(page_to_pfn(skb_shinfo(skb)-> 1563 frags->page)), 1564 0); 1565 req->gref = ref; 1566 req->id = requeue_idx; 1567 1568 requeue_idx++; 1569 } 1570 1571 np->rx.req_prod_pvt = requeue_idx; 1572 1573 /* 1574 * Step 3: All public and private state should now be sane. Get 1575 * ready to start sending and receiving packets and give the driver 1576 * domain a kick because we've probably just requeued some 1577 * packets. 1578 */ 1579 netif_carrier_on(np->netdev); 1580 notify_remote_via_irq(np->netdev->irq); 1581 xennet_tx_buf_gc(dev); 1582 xennet_alloc_rx_buffers(dev); 1583 1584 spin_unlock_irq(&np->tx_lock); 1585 spin_unlock_bh(&np->rx_lock); 1586 1587 return 0; 1588 } 1589 1590 /** 1591 * Callback received when the backend's state changes. 1592 */ 1593 static void backend_changed(struct xenbus_device *dev, 1594 enum xenbus_state backend_state) 1595 { 1596 struct netfront_info *np = dev->dev.driver_data; 1597 struct net_device *netdev = np->netdev; 1598 1599 dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); 1600 1601 switch (backend_state) { 1602 case XenbusStateInitialising: 1603 case XenbusStateInitialised: 1604 case XenbusStateConnected: 1605 case XenbusStateUnknown: 1606 case XenbusStateClosed: 1607 break; 1608 1609 case XenbusStateInitWait: 1610 if (dev->state != XenbusStateInitialising) 1611 break; 1612 if (xennet_connect(netdev) != 0) 1613 break; 1614 xenbus_switch_state(dev, XenbusStateConnected); 1615 break; 1616 1617 case XenbusStateClosing: 1618 xenbus_frontend_closed(dev); 1619 break; 1620 } 1621 } 1622 1623 static struct ethtool_ops xennet_ethtool_ops = 1624 { 1625 .set_tx_csum = ethtool_op_set_tx_csum, 1626 .set_sg = xennet_set_sg, 1627 .set_tso = xennet_set_tso, 1628 .get_link = ethtool_op_get_link, 1629 }; 1630 1631 #ifdef CONFIG_SYSFS 1632 static ssize_t show_rxbuf_min(struct device *dev, 1633 struct device_attribute *attr, char *buf) 1634 { 1635 struct net_device *netdev = to_net_dev(dev); 1636 struct netfront_info *info = netdev_priv(netdev); 1637 1638 return sprintf(buf, "%u\n", info->rx_min_target); 1639 } 1640 1641 static ssize_t store_rxbuf_min(struct device *dev, 1642 struct device_attribute *attr, 1643 const char *buf, size_t len) 1644 { 1645 struct net_device *netdev = to_net_dev(dev); 1646 struct netfront_info *np = netdev_priv(netdev); 1647 char *endp; 1648 unsigned long target; 1649 1650 if (!capable(CAP_NET_ADMIN)) 1651 return -EPERM; 1652 1653 target = simple_strtoul(buf, &endp, 0); 1654 if (endp == buf) 1655 return -EBADMSG; 1656 1657 if (target < RX_MIN_TARGET) 1658 target = RX_MIN_TARGET; 1659 if (target > RX_MAX_TARGET) 1660 target = RX_MAX_TARGET; 1661 1662 spin_lock_bh(&np->rx_lock); 1663 if (target > np->rx_max_target) 1664 np->rx_max_target = target; 1665 np->rx_min_target = target; 1666 if (target > np->rx_target) 1667 np->rx_target = target; 1668 1669 xennet_alloc_rx_buffers(netdev); 1670 1671 spin_unlock_bh(&np->rx_lock); 1672 return len; 1673 } 1674 1675 static ssize_t show_rxbuf_max(struct device *dev, 1676 struct device_attribute *attr, char *buf) 1677 { 1678 struct net_device *netdev = to_net_dev(dev); 1679 struct netfront_info *info = netdev_priv(netdev); 1680 1681 return sprintf(buf, "%u\n", info->rx_max_target); 1682 } 1683 1684 static ssize_t store_rxbuf_max(struct device *dev, 1685 struct device_attribute *attr, 1686 const char *buf, size_t len) 1687 { 1688 struct net_device *netdev = to_net_dev(dev); 1689 struct netfront_info *np = netdev_priv(netdev); 1690 char *endp; 1691 unsigned long target; 1692 1693 if (!capable(CAP_NET_ADMIN)) 1694 return -EPERM; 1695 1696 target = simple_strtoul(buf, &endp, 0); 1697 if (endp == buf) 1698 return -EBADMSG; 1699 1700 if (target < RX_MIN_TARGET) 1701 target = RX_MIN_TARGET; 1702 if (target > RX_MAX_TARGET) 1703 target = RX_MAX_TARGET; 1704 1705 spin_lock_bh(&np->rx_lock); 1706 if (target < np->rx_min_target) 1707 np->rx_min_target = target; 1708 np->rx_max_target = target; 1709 if (target < np->rx_target) 1710 np->rx_target = target; 1711 1712 xennet_alloc_rx_buffers(netdev); 1713 1714 spin_unlock_bh(&np->rx_lock); 1715 return len; 1716 } 1717 1718 static ssize_t show_rxbuf_cur(struct device *dev, 1719 struct device_attribute *attr, char *buf) 1720 { 1721 struct net_device *netdev = to_net_dev(dev); 1722 struct netfront_info *info = netdev_priv(netdev); 1723 1724 return sprintf(buf, "%u\n", info->rx_target); 1725 } 1726 1727 static struct device_attribute xennet_attrs[] = { 1728 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min), 1729 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max), 1730 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL), 1731 }; 1732 1733 static int xennet_sysfs_addif(struct net_device *netdev) 1734 { 1735 int i; 1736 int err; 1737 1738 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) { 1739 err = device_create_file(&netdev->dev, 1740 &xennet_attrs[i]); 1741 if (err) 1742 goto fail; 1743 } 1744 return 0; 1745 1746 fail: 1747 while (--i >= 0) 1748 device_remove_file(&netdev->dev, &xennet_attrs[i]); 1749 return err; 1750 } 1751 1752 static void xennet_sysfs_delif(struct net_device *netdev) 1753 { 1754 int i; 1755 1756 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) 1757 device_remove_file(&netdev->dev, &xennet_attrs[i]); 1758 } 1759 1760 #endif /* CONFIG_SYSFS */ 1761 1762 static struct xenbus_device_id netfront_ids[] = { 1763 { "vif" }, 1764 { "" } 1765 }; 1766 1767 1768 static int __devexit xennet_remove(struct xenbus_device *dev) 1769 { 1770 struct netfront_info *info = dev->dev.driver_data; 1771 1772 dev_dbg(&dev->dev, "%s\n", dev->nodename); 1773 1774 unregister_netdev(info->netdev); 1775 1776 xennet_disconnect_backend(info); 1777 1778 del_timer_sync(&info->rx_refill_timer); 1779 1780 xennet_sysfs_delif(info->netdev); 1781 1782 free_netdev(info->netdev); 1783 1784 return 0; 1785 } 1786 1787 static struct xenbus_driver netfront_driver = { 1788 .name = "vif", 1789 .owner = THIS_MODULE, 1790 .ids = netfront_ids, 1791 .probe = netfront_probe, 1792 .remove = __devexit_p(xennet_remove), 1793 .resume = netfront_resume, 1794 .otherend_changed = backend_changed, 1795 }; 1796 1797 static int __init netif_init(void) 1798 { 1799 if (!xen_domain()) 1800 return -ENODEV; 1801 1802 if (xen_initial_domain()) 1803 return 0; 1804 1805 printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n"); 1806 1807 return xenbus_register_frontend(&netfront_driver); 1808 } 1809 module_init(netif_init); 1810 1811 1812 static void __exit netif_exit(void) 1813 { 1814 if (xen_initial_domain()) 1815 return; 1816 1817 xenbus_unregister_driver(&netfront_driver); 1818 } 1819 module_exit(netif_exit); 1820 1821 MODULE_DESCRIPTION("Xen virtual network device frontend"); 1822 MODULE_LICENSE("GPL"); 1823 MODULE_ALIAS("xen:vif"); 1824 MODULE_ALIAS("xennet"); 1825