1 /* 2 * Network-device interface management. 3 * 4 * Copyright (c) 2004-2005, Keir Fraser 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License version 2 8 * as published by the Free Software Foundation; or, when distributed 9 * separately from the Linux kernel or incorporated into other 10 * software packages, subject to the following license: 11 * 12 * Permission is hereby granted, free of charge, to any person obtaining a copy 13 * of this source file (the "Software"), to deal in the Software without 14 * restriction, including without limitation the rights to use, copy, modify, 15 * merge, publish, distribute, sublicense, and/or sell copies of the Software, 16 * and to permit persons to whom the Software is furnished to do so, subject to 17 * the following conditions: 18 * 19 * The above copyright notice and this permission notice shall be included in 20 * all copies or substantial portions of the Software. 21 * 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 28 * IN THE SOFTWARE. 29 */ 30 31 #include "common.h" 32 33 #include <linux/kthread.h> 34 #include <linux/ethtool.h> 35 #include <linux/rtnetlink.h> 36 #include <linux/if_vlan.h> 37 #include <linux/vmalloc.h> 38 39 #include <xen/events.h> 40 #include <asm/xen/hypercall.h> 41 #include <xen/balloon.h> 42 43 #define XENVIF_QUEUE_LENGTH 32 44 #define XENVIF_NAPI_WEIGHT 64 45 46 /* Number of bytes allowed on the internal guest Rx queue. */ 47 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) 48 49 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as 50 * increasing the inflight counter. We need to increase the inflight 51 * counter because core driver calls into xenvif_zerocopy_callback 52 * which calls xenvif_skb_zerocopy_complete. 53 */ 54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, 55 struct sk_buff *skb) 56 { 57 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; 58 atomic_inc(&queue->inflight_packets); 59 } 60 61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) 62 { 63 atomic_dec(&queue->inflight_packets); 64 65 /* Wake the dealloc thread _after_ decrementing inflight_packets so 66 * that if kthread_stop() has already been called, the dealloc thread 67 * does not wait forever with nothing to wake it. 68 */ 69 wake_up(&queue->dealloc_wq); 70 } 71 72 int xenvif_schedulable(struct xenvif *vif) 73 { 74 return netif_running(vif->dev) && 75 test_bit(VIF_STATUS_CONNECTED, &vif->status) && 76 !vif->disabled; 77 } 78 79 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 80 { 81 struct xenvif_queue *queue = dev_id; 82 83 if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)) 84 napi_schedule(&queue->napi); 85 86 return IRQ_HANDLED; 87 } 88 89 static int xenvif_poll(struct napi_struct *napi, int budget) 90 { 91 struct xenvif_queue *queue = 92 container_of(napi, struct xenvif_queue, napi); 93 int work_done; 94 95 /* This vif is rogue, we pretend we've there is nothing to do 96 * for this vif to deschedule it from NAPI. But this interface 97 * will be turned off in thread context later. 98 */ 99 if (unlikely(queue->vif->disabled)) { 100 napi_complete(napi); 101 return 0; 102 } 103 104 work_done = xenvif_tx_action(queue, budget); 105 106 if (work_done < budget) { 107 napi_complete(napi); 108 xenvif_napi_schedule_or_enable_events(queue); 109 } 110 111 return work_done; 112 } 113 114 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 115 { 116 struct xenvif_queue *queue = dev_id; 117 118 xenvif_kick_thread(queue); 119 120 return IRQ_HANDLED; 121 } 122 123 irqreturn_t xenvif_interrupt(int irq, void *dev_id) 124 { 125 xenvif_tx_interrupt(irq, dev_id); 126 xenvif_rx_interrupt(irq, dev_id); 127 128 return IRQ_HANDLED; 129 } 130 131 int xenvif_queue_stopped(struct xenvif_queue *queue) 132 { 133 struct net_device *dev = queue->vif->dev; 134 unsigned int id = queue->id; 135 return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id)); 136 } 137 138 void xenvif_wake_queue(struct xenvif_queue *queue) 139 { 140 struct net_device *dev = queue->vif->dev; 141 unsigned int id = queue->id; 142 netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); 143 } 144 145 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, 146 void *accel_priv, 147 select_queue_fallback_t fallback) 148 { 149 struct xenvif *vif = netdev_priv(dev); 150 unsigned int size = vif->hash.size; 151 152 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) 153 return fallback(dev, skb) % dev->real_num_tx_queues; 154 155 xenvif_set_skb_hash(vif, skb); 156 157 if (size == 0) 158 return skb_get_hash_raw(skb) % dev->real_num_tx_queues; 159 160 return vif->hash.mapping[skb_get_hash_raw(skb) % size]; 161 } 162 163 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 164 { 165 struct xenvif *vif = netdev_priv(dev); 166 struct xenvif_queue *queue = NULL; 167 unsigned int num_queues = vif->num_queues; 168 u16 index; 169 struct xenvif_rx_cb *cb; 170 171 BUG_ON(skb->dev != dev); 172 173 /* Drop the packet if queues are not set up */ 174 if (num_queues < 1) 175 goto drop; 176 177 /* Obtain the queue to be used to transmit this packet */ 178 index = skb_get_queue_mapping(skb); 179 if (index >= num_queues) { 180 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", 181 index, vif->dev->name); 182 index %= num_queues; 183 } 184 queue = &vif->queues[index]; 185 186 /* Drop the packet if queue is not ready */ 187 if (queue->task == NULL || 188 queue->dealloc_task == NULL || 189 !xenvif_schedulable(vif)) 190 goto drop; 191 192 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { 193 struct ethhdr *eth = (struct ethhdr *)skb->data; 194 195 if (!xenvif_mcast_match(vif, eth->h_dest)) 196 goto drop; 197 } 198 199 cb = XENVIF_RX_CB(skb); 200 cb->expires = jiffies + vif->drain_timeout; 201 202 /* If there is no hash algorithm configured then make sure there 203 * is no hash information in the socket buffer otherwise it 204 * would be incorrectly forwarded to the frontend. 205 */ 206 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) 207 skb_clear_hash(skb); 208 209 xenvif_rx_queue_tail(queue, skb); 210 xenvif_kick_thread(queue); 211 212 return NETDEV_TX_OK; 213 214 drop: 215 vif->dev->stats.tx_dropped++; 216 dev_kfree_skb(skb); 217 return NETDEV_TX_OK; 218 } 219 220 static struct net_device_stats *xenvif_get_stats(struct net_device *dev) 221 { 222 struct xenvif *vif = netdev_priv(dev); 223 struct xenvif_queue *queue = NULL; 224 unsigned int num_queues = vif->num_queues; 225 unsigned long rx_bytes = 0; 226 unsigned long rx_packets = 0; 227 unsigned long tx_bytes = 0; 228 unsigned long tx_packets = 0; 229 unsigned int index; 230 231 if (vif->queues == NULL) 232 goto out; 233 234 /* Aggregate tx and rx stats from each queue */ 235 for (index = 0; index < num_queues; ++index) { 236 queue = &vif->queues[index]; 237 rx_bytes += queue->stats.rx_bytes; 238 rx_packets += queue->stats.rx_packets; 239 tx_bytes += queue->stats.tx_bytes; 240 tx_packets += queue->stats.tx_packets; 241 } 242 243 out: 244 vif->dev->stats.rx_bytes = rx_bytes; 245 vif->dev->stats.rx_packets = rx_packets; 246 vif->dev->stats.tx_bytes = tx_bytes; 247 vif->dev->stats.tx_packets = tx_packets; 248 249 return &vif->dev->stats; 250 } 251 252 static void xenvif_up(struct xenvif *vif) 253 { 254 struct xenvif_queue *queue = NULL; 255 unsigned int num_queues = vif->num_queues; 256 unsigned int queue_index; 257 258 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 259 queue = &vif->queues[queue_index]; 260 napi_enable(&queue->napi); 261 enable_irq(queue->tx_irq); 262 if (queue->tx_irq != queue->rx_irq) 263 enable_irq(queue->rx_irq); 264 xenvif_napi_schedule_or_enable_events(queue); 265 } 266 } 267 268 static void xenvif_down(struct xenvif *vif) 269 { 270 struct xenvif_queue *queue = NULL; 271 unsigned int num_queues = vif->num_queues; 272 unsigned int queue_index; 273 274 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 275 queue = &vif->queues[queue_index]; 276 disable_irq(queue->tx_irq); 277 if (queue->tx_irq != queue->rx_irq) 278 disable_irq(queue->rx_irq); 279 napi_disable(&queue->napi); 280 del_timer_sync(&queue->credit_timeout); 281 } 282 } 283 284 static int xenvif_open(struct net_device *dev) 285 { 286 struct xenvif *vif = netdev_priv(dev); 287 if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) 288 xenvif_up(vif); 289 netif_tx_start_all_queues(dev); 290 return 0; 291 } 292 293 static int xenvif_close(struct net_device *dev) 294 { 295 struct xenvif *vif = netdev_priv(dev); 296 if (test_bit(VIF_STATUS_CONNECTED, &vif->status)) 297 xenvif_down(vif); 298 netif_tx_stop_all_queues(dev); 299 return 0; 300 } 301 302 static int xenvif_change_mtu(struct net_device *dev, int mtu) 303 { 304 struct xenvif *vif = netdev_priv(dev); 305 int max = vif->can_sg ? 65535 - VLAN_ETH_HLEN : ETH_DATA_LEN; 306 307 if (mtu > max) 308 return -EINVAL; 309 dev->mtu = mtu; 310 return 0; 311 } 312 313 static netdev_features_t xenvif_fix_features(struct net_device *dev, 314 netdev_features_t features) 315 { 316 struct xenvif *vif = netdev_priv(dev); 317 318 if (!vif->can_sg) 319 features &= ~NETIF_F_SG; 320 if (~(vif->gso_mask) & GSO_BIT(TCPV4)) 321 features &= ~NETIF_F_TSO; 322 if (~(vif->gso_mask) & GSO_BIT(TCPV6)) 323 features &= ~NETIF_F_TSO6; 324 if (!vif->ip_csum) 325 features &= ~NETIF_F_IP_CSUM; 326 if (!vif->ipv6_csum) 327 features &= ~NETIF_F_IPV6_CSUM; 328 329 return features; 330 } 331 332 static const struct xenvif_stat { 333 char name[ETH_GSTRING_LEN]; 334 u16 offset; 335 } xenvif_stats[] = { 336 { 337 "rx_gso_checksum_fixup", 338 offsetof(struct xenvif_stats, rx_gso_checksum_fixup) 339 }, 340 /* If (sent != success + fail), there are probably packets never 341 * freed up properly! 342 */ 343 { 344 "tx_zerocopy_sent", 345 offsetof(struct xenvif_stats, tx_zerocopy_sent), 346 }, 347 { 348 "tx_zerocopy_success", 349 offsetof(struct xenvif_stats, tx_zerocopy_success), 350 }, 351 { 352 "tx_zerocopy_fail", 353 offsetof(struct xenvif_stats, tx_zerocopy_fail) 354 }, 355 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use 356 * a guest with the same MAX_SKB_FRAG 357 */ 358 { 359 "tx_frag_overflow", 360 offsetof(struct xenvif_stats, tx_frag_overflow) 361 }, 362 }; 363 364 static int xenvif_get_sset_count(struct net_device *dev, int string_set) 365 { 366 switch (string_set) { 367 case ETH_SS_STATS: 368 return ARRAY_SIZE(xenvif_stats); 369 default: 370 return -EINVAL; 371 } 372 } 373 374 static void xenvif_get_ethtool_stats(struct net_device *dev, 375 struct ethtool_stats *stats, u64 * data) 376 { 377 struct xenvif *vif = netdev_priv(dev); 378 unsigned int num_queues = vif->num_queues; 379 int i; 380 unsigned int queue_index; 381 382 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { 383 unsigned long accum = 0; 384 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 385 void *vif_stats = &vif->queues[queue_index].stats; 386 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); 387 } 388 data[i] = accum; 389 } 390 } 391 392 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data) 393 { 394 int i; 395 396 switch (stringset) { 397 case ETH_SS_STATS: 398 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) 399 memcpy(data + i * ETH_GSTRING_LEN, 400 xenvif_stats[i].name, ETH_GSTRING_LEN); 401 break; 402 } 403 } 404 405 static const struct ethtool_ops xenvif_ethtool_ops = { 406 .get_link = ethtool_op_get_link, 407 408 .get_sset_count = xenvif_get_sset_count, 409 .get_ethtool_stats = xenvif_get_ethtool_stats, 410 .get_strings = xenvif_get_strings, 411 }; 412 413 static const struct net_device_ops xenvif_netdev_ops = { 414 .ndo_select_queue = xenvif_select_queue, 415 .ndo_start_xmit = xenvif_start_xmit, 416 .ndo_get_stats = xenvif_get_stats, 417 .ndo_open = xenvif_open, 418 .ndo_stop = xenvif_close, 419 .ndo_change_mtu = xenvif_change_mtu, 420 .ndo_fix_features = xenvif_fix_features, 421 .ndo_set_mac_address = eth_mac_addr, 422 .ndo_validate_addr = eth_validate_addr, 423 }; 424 425 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 426 unsigned int handle) 427 { 428 int err; 429 struct net_device *dev; 430 struct xenvif *vif; 431 char name[IFNAMSIZ] = {}; 432 433 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 434 /* Allocate a netdev with the max. supported number of queues. 435 * When the guest selects the desired number, it will be updated 436 * via netif_set_real_num_*_queues(). 437 */ 438 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN, 439 ether_setup, xenvif_max_queues); 440 if (dev == NULL) { 441 pr_warn("Could not allocate netdev for %s\n", name); 442 return ERR_PTR(-ENOMEM); 443 } 444 445 SET_NETDEV_DEV(dev, parent); 446 447 vif = netdev_priv(dev); 448 449 vif->domid = domid; 450 vif->handle = handle; 451 vif->can_sg = 1; 452 vif->ip_csum = 1; 453 vif->dev = dev; 454 vif->disabled = false; 455 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs); 456 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs); 457 458 /* Start out with no queues. */ 459 vif->queues = NULL; 460 vif->num_queues = 0; 461 462 spin_lock_init(&vif->lock); 463 INIT_LIST_HEAD(&vif->fe_mcast_addr); 464 465 dev->netdev_ops = &xenvif_netdev_ops; 466 dev->hw_features = NETIF_F_SG | 467 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 468 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST; 469 dev->features = dev->hw_features | NETIF_F_RXCSUM; 470 dev->ethtool_ops = &xenvif_ethtool_ops; 471 472 dev->tx_queue_len = XENVIF_QUEUE_LENGTH; 473 474 /* 475 * Initialise a dummy MAC address. We choose the numerically 476 * largest non-broadcast address to prevent the address getting 477 * stolen by an Ethernet bridge for STP purposes. 478 * (FE:FF:FF:FF:FF:FF) 479 */ 480 eth_broadcast_addr(dev->dev_addr); 481 dev->dev_addr[0] &= ~0x01; 482 483 netif_carrier_off(dev); 484 485 err = register_netdev(dev); 486 if (err) { 487 netdev_warn(dev, "Could not register device: err=%d\n", err); 488 free_netdev(dev); 489 return ERR_PTR(err); 490 } 491 492 netdev_dbg(dev, "Successfully created xenvif\n"); 493 494 __module_get(THIS_MODULE); 495 496 return vif; 497 } 498 499 int xenvif_init_queue(struct xenvif_queue *queue) 500 { 501 int err, i; 502 503 queue->credit_bytes = queue->remaining_credit = ~0UL; 504 queue->credit_usec = 0UL; 505 init_timer(&queue->credit_timeout); 506 queue->credit_timeout.function = xenvif_tx_credit_callback; 507 queue->credit_window_start = get_jiffies_64(); 508 509 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; 510 511 skb_queue_head_init(&queue->rx_queue); 512 skb_queue_head_init(&queue->tx_queue); 513 514 queue->pending_cons = 0; 515 queue->pending_prod = MAX_PENDING_REQS; 516 for (i = 0; i < MAX_PENDING_REQS; ++i) 517 queue->pending_ring[i] = i; 518 519 spin_lock_init(&queue->callback_lock); 520 spin_lock_init(&queue->response_lock); 521 522 /* If ballooning is disabled, this will consume real memory, so you 523 * better enable it. The long term solution would be to use just a 524 * bunch of valid page descriptors, without dependency on ballooning 525 */ 526 err = gnttab_alloc_pages(MAX_PENDING_REQS, 527 queue->mmap_pages); 528 if (err) { 529 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n"); 530 return -ENOMEM; 531 } 532 533 for (i = 0; i < MAX_PENDING_REQS; i++) { 534 queue->pending_tx_info[i].callback_struct = (struct ubuf_info) 535 { .callback = xenvif_zerocopy_callback, 536 .ctx = NULL, 537 .desc = i }; 538 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 539 } 540 541 return 0; 542 } 543 544 void xenvif_carrier_on(struct xenvif *vif) 545 { 546 rtnl_lock(); 547 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) 548 dev_set_mtu(vif->dev, ETH_DATA_LEN); 549 netdev_update_features(vif->dev); 550 set_bit(VIF_STATUS_CONNECTED, &vif->status); 551 if (netif_running(vif->dev)) 552 xenvif_up(vif); 553 rtnl_unlock(); 554 } 555 556 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, 557 unsigned int evtchn) 558 { 559 struct net_device *dev = vif->dev; 560 void *addr; 561 struct xen_netif_ctrl_sring *shared; 562 int err; 563 564 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif), 565 &ring_ref, 1, &addr); 566 if (err) 567 goto err; 568 569 shared = (struct xen_netif_ctrl_sring *)addr; 570 BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE); 571 572 err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn); 573 if (err < 0) 574 goto err_unmap; 575 576 vif->ctrl_irq = err; 577 578 xenvif_init_hash(vif); 579 580 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn, 581 IRQF_ONESHOT, "xen-netback-ctrl", vif); 582 if (err) { 583 pr_warn("Could not setup irq handler for %s\n", dev->name); 584 goto err_deinit; 585 } 586 587 return 0; 588 589 err_deinit: 590 xenvif_deinit_hash(vif); 591 unbind_from_irqhandler(vif->ctrl_irq, vif); 592 vif->ctrl_irq = 0; 593 594 err_unmap: 595 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 596 vif->ctrl.sring); 597 vif->ctrl.sring = NULL; 598 599 err: 600 return err; 601 } 602 603 int xenvif_connect_data(struct xenvif_queue *queue, 604 unsigned long tx_ring_ref, 605 unsigned long rx_ring_ref, 606 unsigned int tx_evtchn, 607 unsigned int rx_evtchn) 608 { 609 struct task_struct *task; 610 int err = -ENOMEM; 611 612 BUG_ON(queue->tx_irq); 613 BUG_ON(queue->task); 614 BUG_ON(queue->dealloc_task); 615 616 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref, 617 rx_ring_ref); 618 if (err < 0) 619 goto err; 620 621 init_waitqueue_head(&queue->wq); 622 init_waitqueue_head(&queue->dealloc_wq); 623 atomic_set(&queue->inflight_packets, 0); 624 625 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, 626 XENVIF_NAPI_WEIGHT); 627 628 if (tx_evtchn == rx_evtchn) { 629 /* feature-split-event-channels == 0 */ 630 err = bind_interdomain_evtchn_to_irqhandler( 631 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0, 632 queue->name, queue); 633 if (err < 0) 634 goto err_unmap; 635 queue->tx_irq = queue->rx_irq = err; 636 disable_irq(queue->tx_irq); 637 } else { 638 /* feature-split-event-channels == 1 */ 639 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name), 640 "%s-tx", queue->name); 641 err = bind_interdomain_evtchn_to_irqhandler( 642 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0, 643 queue->tx_irq_name, queue); 644 if (err < 0) 645 goto err_unmap; 646 queue->tx_irq = err; 647 disable_irq(queue->tx_irq); 648 649 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name), 650 "%s-rx", queue->name); 651 err = bind_interdomain_evtchn_to_irqhandler( 652 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0, 653 queue->rx_irq_name, queue); 654 if (err < 0) 655 goto err_tx_unbind; 656 queue->rx_irq = err; 657 disable_irq(queue->rx_irq); 658 } 659 660 queue->stalled = true; 661 662 task = kthread_create(xenvif_kthread_guest_rx, 663 (void *)queue, "%s-guest-rx", queue->name); 664 if (IS_ERR(task)) { 665 pr_warn("Could not allocate kthread for %s\n", queue->name); 666 err = PTR_ERR(task); 667 goto err_rx_unbind; 668 } 669 queue->task = task; 670 get_task_struct(task); 671 672 task = kthread_create(xenvif_dealloc_kthread, 673 (void *)queue, "%s-dealloc", queue->name); 674 if (IS_ERR(task)) { 675 pr_warn("Could not allocate kthread for %s\n", queue->name); 676 err = PTR_ERR(task); 677 goto err_rx_unbind; 678 } 679 queue->dealloc_task = task; 680 681 wake_up_process(queue->task); 682 wake_up_process(queue->dealloc_task); 683 684 return 0; 685 686 err_rx_unbind: 687 unbind_from_irqhandler(queue->rx_irq, queue); 688 queue->rx_irq = 0; 689 err_tx_unbind: 690 unbind_from_irqhandler(queue->tx_irq, queue); 691 queue->tx_irq = 0; 692 err_unmap: 693 xenvif_unmap_frontend_data_rings(queue); 694 netif_napi_del(&queue->napi); 695 err: 696 module_put(THIS_MODULE); 697 return err; 698 } 699 700 void xenvif_carrier_off(struct xenvif *vif) 701 { 702 struct net_device *dev = vif->dev; 703 704 rtnl_lock(); 705 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) { 706 netif_carrier_off(dev); /* discard queued packets */ 707 if (netif_running(dev)) 708 xenvif_down(vif); 709 } 710 rtnl_unlock(); 711 } 712 713 void xenvif_disconnect_data(struct xenvif *vif) 714 { 715 struct xenvif_queue *queue = NULL; 716 unsigned int num_queues = vif->num_queues; 717 unsigned int queue_index; 718 719 xenvif_carrier_off(vif); 720 721 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 722 queue = &vif->queues[queue_index]; 723 724 netif_napi_del(&queue->napi); 725 726 if (queue->task) { 727 kthread_stop(queue->task); 728 put_task_struct(queue->task); 729 queue->task = NULL; 730 } 731 732 if (queue->dealloc_task) { 733 kthread_stop(queue->dealloc_task); 734 queue->dealloc_task = NULL; 735 } 736 737 if (queue->tx_irq) { 738 if (queue->tx_irq == queue->rx_irq) 739 unbind_from_irqhandler(queue->tx_irq, queue); 740 else { 741 unbind_from_irqhandler(queue->tx_irq, queue); 742 unbind_from_irqhandler(queue->rx_irq, queue); 743 } 744 queue->tx_irq = 0; 745 } 746 747 xenvif_unmap_frontend_data_rings(queue); 748 } 749 750 xenvif_mcast_addr_list_free(vif); 751 } 752 753 void xenvif_disconnect_ctrl(struct xenvif *vif) 754 { 755 if (vif->ctrl_irq) { 756 xenvif_deinit_hash(vif); 757 unbind_from_irqhandler(vif->ctrl_irq, vif); 758 vif->ctrl_irq = 0; 759 } 760 761 if (vif->ctrl.sring) { 762 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif), 763 vif->ctrl.sring); 764 vif->ctrl.sring = NULL; 765 } 766 } 767 768 /* Reverse the relevant parts of xenvif_init_queue(). 769 * Used for queue teardown from xenvif_free(), and on the 770 * error handling paths in xenbus.c:connect(). 771 */ 772 void xenvif_deinit_queue(struct xenvif_queue *queue) 773 { 774 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages); 775 } 776 777 void xenvif_free(struct xenvif *vif) 778 { 779 struct xenvif_queue *queues = vif->queues; 780 unsigned int num_queues = vif->num_queues; 781 unsigned int queue_index; 782 783 unregister_netdev(vif->dev); 784 free_netdev(vif->dev); 785 786 for (queue_index = 0; queue_index < num_queues; ++queue_index) 787 xenvif_deinit_queue(&queues[queue_index]); 788 vfree(queues); 789 790 module_put(THIS_MODULE); 791 } 792