xref: /linux/drivers/net/xen-netback/interface.c (revision 27cf5706a04e53f6844c71be1cbbf1df665f5d19)
1 /*
2  * Network-device interface management.
3  *
4  * Copyright (c) 2004-2005, Keir Fraser
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License version 2
8  * as published by the Free Software Foundation; or, when distributed
9  * separately from the Linux kernel or incorporated into other
10  * software packages, subject to the following license:
11  *
12  * Permission is hereby granted, free of charge, to any person obtaining a copy
13  * of this source file (the "Software"), to deal in the Software without
14  * restriction, including without limitation the rights to use, copy, modify,
15  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16  * and to permit persons to whom the Software is furnished to do so, subject to
17  * the following conditions:
18  *
19  * The above copyright notice and this permission notice shall be included in
20  * all copies or substantial portions of the Software.
21  *
22  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28  * IN THE SOFTWARE.
29  */
30 
31 #include "common.h"
32 
33 #include <linux/kthread.h>
34 #include <linux/ethtool.h>
35 #include <linux/rtnetlink.h>
36 #include <linux/if_vlan.h>
37 #include <linux/vmalloc.h>
38 
39 #include <xen/events.h>
40 #include <asm/xen/hypercall.h>
41 #include <xen/balloon.h>
42 
43 #define XENVIF_QUEUE_LENGTH 32
44 #define XENVIF_NAPI_WEIGHT  64
45 
46 /* Number of bytes allowed on the internal guest Rx queue. */
47 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48 
49 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
50  * increasing the inflight counter. We need to increase the inflight
51  * counter because core driver calls into xenvif_zerocopy_callback
52  * which calls xenvif_skb_zerocopy_complete.
53  */
54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 				 struct sk_buff *skb)
56 {
57 	skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
58 	atomic_inc(&queue->inflight_packets);
59 }
60 
61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62 {
63 	atomic_dec(&queue->inflight_packets);
64 
65 	/* Wake the dealloc thread _after_ decrementing inflight_packets so
66 	 * that if kthread_stop() has already been called, the dealloc thread
67 	 * does not wait forever with nothing to wake it.
68 	 */
69 	wake_up(&queue->dealloc_wq);
70 }
71 
72 int xenvif_schedulable(struct xenvif *vif)
73 {
74 	return netif_running(vif->dev) &&
75 		test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
76 		!vif->disabled;
77 }
78 
79 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
80 {
81 	struct xenvif_queue *queue = dev_id;
82 
83 	if (RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))
84 		napi_schedule(&queue->napi);
85 
86 	return IRQ_HANDLED;
87 }
88 
89 static int xenvif_poll(struct napi_struct *napi, int budget)
90 {
91 	struct xenvif_queue *queue =
92 		container_of(napi, struct xenvif_queue, napi);
93 	int work_done;
94 
95 	/* This vif is rogue, we pretend we've there is nothing to do
96 	 * for this vif to deschedule it from NAPI. But this interface
97 	 * will be turned off in thread context later.
98 	 */
99 	if (unlikely(queue->vif->disabled)) {
100 		napi_complete(napi);
101 		return 0;
102 	}
103 
104 	work_done = xenvif_tx_action(queue, budget);
105 
106 	if (work_done < budget) {
107 		napi_complete(napi);
108 		xenvif_napi_schedule_or_enable_events(queue);
109 	}
110 
111 	return work_done;
112 }
113 
114 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115 {
116 	struct xenvif_queue *queue = dev_id;
117 
118 	xenvif_kick_thread(queue);
119 
120 	return IRQ_HANDLED;
121 }
122 
123 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
124 {
125 	xenvif_tx_interrupt(irq, dev_id);
126 	xenvif_rx_interrupt(irq, dev_id);
127 
128 	return IRQ_HANDLED;
129 }
130 
131 int xenvif_queue_stopped(struct xenvif_queue *queue)
132 {
133 	struct net_device *dev = queue->vif->dev;
134 	unsigned int id = queue->id;
135 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, id));
136 }
137 
138 void xenvif_wake_queue(struct xenvif_queue *queue)
139 {
140 	struct net_device *dev = queue->vif->dev;
141 	unsigned int id = queue->id;
142 	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
143 }
144 
145 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
146 			       void *accel_priv,
147 			       select_queue_fallback_t fallback)
148 {
149 	struct xenvif *vif = netdev_priv(dev);
150 	unsigned int size = vif->hash.size;
151 
152 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
153 		return fallback(dev, skb) % dev->real_num_tx_queues;
154 
155 	xenvif_set_skb_hash(vif, skb);
156 
157 	if (size == 0)
158 		return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
159 
160 	return vif->hash.mapping[skb_get_hash_raw(skb) % size];
161 }
162 
163 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
164 {
165 	struct xenvif *vif = netdev_priv(dev);
166 	struct xenvif_queue *queue = NULL;
167 	unsigned int num_queues = vif->num_queues;
168 	u16 index;
169 	struct xenvif_rx_cb *cb;
170 
171 	BUG_ON(skb->dev != dev);
172 
173 	/* Drop the packet if queues are not set up */
174 	if (num_queues < 1)
175 		goto drop;
176 
177 	/* Obtain the queue to be used to transmit this packet */
178 	index = skb_get_queue_mapping(skb);
179 	if (index >= num_queues) {
180 		pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.",
181 				    index, vif->dev->name);
182 		index %= num_queues;
183 	}
184 	queue = &vif->queues[index];
185 
186 	/* Drop the packet if queue is not ready */
187 	if (queue->task == NULL ||
188 	    queue->dealloc_task == NULL ||
189 	    !xenvif_schedulable(vif))
190 		goto drop;
191 
192 	if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
193 		struct ethhdr *eth = (struct ethhdr *)skb->data;
194 
195 		if (!xenvif_mcast_match(vif, eth->h_dest))
196 			goto drop;
197 	}
198 
199 	cb = XENVIF_RX_CB(skb);
200 	cb->expires = jiffies + vif->drain_timeout;
201 
202 	/* If there is no hash algorithm configured then make sure there
203 	 * is no hash information in the socket buffer otherwise it
204 	 * would be incorrectly forwarded to the frontend.
205 	 */
206 	if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
207 		skb_clear_hash(skb);
208 
209 	xenvif_rx_queue_tail(queue, skb);
210 	xenvif_kick_thread(queue);
211 
212 	return NETDEV_TX_OK;
213 
214  drop:
215 	vif->dev->stats.tx_dropped++;
216 	dev_kfree_skb(skb);
217 	return NETDEV_TX_OK;
218 }
219 
220 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
221 {
222 	struct xenvif *vif = netdev_priv(dev);
223 	struct xenvif_queue *queue = NULL;
224 	unsigned long rx_bytes = 0;
225 	unsigned long rx_packets = 0;
226 	unsigned long tx_bytes = 0;
227 	unsigned long tx_packets = 0;
228 	unsigned int index;
229 
230 	spin_lock(&vif->lock);
231 	if (vif->queues == NULL)
232 		goto out;
233 
234 	/* Aggregate tx and rx stats from each queue */
235 	for (index = 0; index < vif->num_queues; ++index) {
236 		queue = &vif->queues[index];
237 		rx_bytes += queue->stats.rx_bytes;
238 		rx_packets += queue->stats.rx_packets;
239 		tx_bytes += queue->stats.tx_bytes;
240 		tx_packets += queue->stats.tx_packets;
241 	}
242 
243 out:
244 	spin_unlock(&vif->lock);
245 
246 	vif->dev->stats.rx_bytes = rx_bytes;
247 	vif->dev->stats.rx_packets = rx_packets;
248 	vif->dev->stats.tx_bytes = tx_bytes;
249 	vif->dev->stats.tx_packets = tx_packets;
250 
251 	return &vif->dev->stats;
252 }
253 
254 static void xenvif_up(struct xenvif *vif)
255 {
256 	struct xenvif_queue *queue = NULL;
257 	unsigned int num_queues = vif->num_queues;
258 	unsigned int queue_index;
259 
260 	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
261 		queue = &vif->queues[queue_index];
262 		napi_enable(&queue->napi);
263 		enable_irq(queue->tx_irq);
264 		if (queue->tx_irq != queue->rx_irq)
265 			enable_irq(queue->rx_irq);
266 		xenvif_napi_schedule_or_enable_events(queue);
267 	}
268 }
269 
270 static void xenvif_down(struct xenvif *vif)
271 {
272 	struct xenvif_queue *queue = NULL;
273 	unsigned int num_queues = vif->num_queues;
274 	unsigned int queue_index;
275 
276 	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
277 		queue = &vif->queues[queue_index];
278 		disable_irq(queue->tx_irq);
279 		if (queue->tx_irq != queue->rx_irq)
280 			disable_irq(queue->rx_irq);
281 		napi_disable(&queue->napi);
282 		del_timer_sync(&queue->credit_timeout);
283 	}
284 }
285 
286 static int xenvif_open(struct net_device *dev)
287 {
288 	struct xenvif *vif = netdev_priv(dev);
289 	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
290 		xenvif_up(vif);
291 	netif_tx_start_all_queues(dev);
292 	return 0;
293 }
294 
295 static int xenvif_close(struct net_device *dev)
296 {
297 	struct xenvif *vif = netdev_priv(dev);
298 	if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
299 		xenvif_down(vif);
300 	netif_tx_stop_all_queues(dev);
301 	return 0;
302 }
303 
304 static int xenvif_change_mtu(struct net_device *dev, int mtu)
305 {
306 	struct xenvif *vif = netdev_priv(dev);
307 	int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
308 
309 	if (mtu > max)
310 		return -EINVAL;
311 	dev->mtu = mtu;
312 	return 0;
313 }
314 
315 static netdev_features_t xenvif_fix_features(struct net_device *dev,
316 	netdev_features_t features)
317 {
318 	struct xenvif *vif = netdev_priv(dev);
319 
320 	if (!vif->can_sg)
321 		features &= ~NETIF_F_SG;
322 	if (~(vif->gso_mask) & GSO_BIT(TCPV4))
323 		features &= ~NETIF_F_TSO;
324 	if (~(vif->gso_mask) & GSO_BIT(TCPV6))
325 		features &= ~NETIF_F_TSO6;
326 	if (!vif->ip_csum)
327 		features &= ~NETIF_F_IP_CSUM;
328 	if (!vif->ipv6_csum)
329 		features &= ~NETIF_F_IPV6_CSUM;
330 
331 	return features;
332 }
333 
334 static const struct xenvif_stat {
335 	char name[ETH_GSTRING_LEN];
336 	u16 offset;
337 } xenvif_stats[] = {
338 	{
339 		"rx_gso_checksum_fixup",
340 		offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
341 	},
342 	/* If (sent != success + fail), there are probably packets never
343 	 * freed up properly!
344 	 */
345 	{
346 		"tx_zerocopy_sent",
347 		offsetof(struct xenvif_stats, tx_zerocopy_sent),
348 	},
349 	{
350 		"tx_zerocopy_success",
351 		offsetof(struct xenvif_stats, tx_zerocopy_success),
352 	},
353 	{
354 		"tx_zerocopy_fail",
355 		offsetof(struct xenvif_stats, tx_zerocopy_fail)
356 	},
357 	/* Number of packets exceeding MAX_SKB_FRAG slots. You should use
358 	 * a guest with the same MAX_SKB_FRAG
359 	 */
360 	{
361 		"tx_frag_overflow",
362 		offsetof(struct xenvif_stats, tx_frag_overflow)
363 	},
364 };
365 
366 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
367 {
368 	switch (string_set) {
369 	case ETH_SS_STATS:
370 		return ARRAY_SIZE(xenvif_stats);
371 	default:
372 		return -EINVAL;
373 	}
374 }
375 
376 static void xenvif_get_ethtool_stats(struct net_device *dev,
377 				     struct ethtool_stats *stats, u64 * data)
378 {
379 	struct xenvif *vif = netdev_priv(dev);
380 	unsigned int num_queues = vif->num_queues;
381 	int i;
382 	unsigned int queue_index;
383 
384 	for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
385 		unsigned long accum = 0;
386 		for (queue_index = 0; queue_index < num_queues; ++queue_index) {
387 			void *vif_stats = &vif->queues[queue_index].stats;
388 			accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
389 		}
390 		data[i] = accum;
391 	}
392 }
393 
394 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
395 {
396 	int i;
397 
398 	switch (stringset) {
399 	case ETH_SS_STATS:
400 		for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
401 			memcpy(data + i * ETH_GSTRING_LEN,
402 			       xenvif_stats[i].name, ETH_GSTRING_LEN);
403 		break;
404 	}
405 }
406 
407 static const struct ethtool_ops xenvif_ethtool_ops = {
408 	.get_link	= ethtool_op_get_link,
409 
410 	.get_sset_count = xenvif_get_sset_count,
411 	.get_ethtool_stats = xenvif_get_ethtool_stats,
412 	.get_strings = xenvif_get_strings,
413 };
414 
415 static const struct net_device_ops xenvif_netdev_ops = {
416 	.ndo_select_queue = xenvif_select_queue,
417 	.ndo_start_xmit	= xenvif_start_xmit,
418 	.ndo_get_stats	= xenvif_get_stats,
419 	.ndo_open	= xenvif_open,
420 	.ndo_stop	= xenvif_close,
421 	.ndo_change_mtu	= xenvif_change_mtu,
422 	.ndo_fix_features = xenvif_fix_features,
423 	.ndo_set_mac_address = eth_mac_addr,
424 	.ndo_validate_addr   = eth_validate_addr,
425 };
426 
427 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
428 			    unsigned int handle)
429 {
430 	int err;
431 	struct net_device *dev;
432 	struct xenvif *vif;
433 	char name[IFNAMSIZ] = {};
434 
435 	snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
436 	/* Allocate a netdev with the max. supported number of queues.
437 	 * When the guest selects the desired number, it will be updated
438 	 * via netif_set_real_num_*_queues().
439 	 */
440 	dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
441 			      ether_setup, xenvif_max_queues);
442 	if (dev == NULL) {
443 		pr_warn("Could not allocate netdev for %s\n", name);
444 		return ERR_PTR(-ENOMEM);
445 	}
446 
447 	SET_NETDEV_DEV(dev, parent);
448 
449 	vif = netdev_priv(dev);
450 
451 	vif->domid  = domid;
452 	vif->handle = handle;
453 	vif->can_sg = 1;
454 	vif->ip_csum = 1;
455 	vif->dev = dev;
456 	vif->disabled = false;
457 	vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
458 	vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
459 
460 	/* Start out with no queues. */
461 	vif->queues = NULL;
462 	vif->num_queues = 0;
463 
464 	spin_lock_init(&vif->lock);
465 	INIT_LIST_HEAD(&vif->fe_mcast_addr);
466 
467 	dev->netdev_ops	= &xenvif_netdev_ops;
468 	dev->hw_features = NETIF_F_SG |
469 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
470 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
471 	dev->features = dev->hw_features | NETIF_F_RXCSUM;
472 	dev->ethtool_ops = &xenvif_ethtool_ops;
473 
474 	dev->tx_queue_len = XENVIF_QUEUE_LENGTH;
475 
476 	dev->min_mtu = 0;
477 	dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
478 
479 	/*
480 	 * Initialise a dummy MAC address. We choose the numerically
481 	 * largest non-broadcast address to prevent the address getting
482 	 * stolen by an Ethernet bridge for STP purposes.
483 	 * (FE:FF:FF:FF:FF:FF)
484 	 */
485 	eth_broadcast_addr(dev->dev_addr);
486 	dev->dev_addr[0] &= ~0x01;
487 
488 	netif_carrier_off(dev);
489 
490 	err = register_netdev(dev);
491 	if (err) {
492 		netdev_warn(dev, "Could not register device: err=%d\n", err);
493 		free_netdev(dev);
494 		return ERR_PTR(err);
495 	}
496 
497 	netdev_dbg(dev, "Successfully created xenvif\n");
498 
499 	__module_get(THIS_MODULE);
500 
501 	return vif;
502 }
503 
504 int xenvif_init_queue(struct xenvif_queue *queue)
505 {
506 	int err, i;
507 
508 	queue->credit_bytes = queue->remaining_credit = ~0UL;
509 	queue->credit_usec  = 0UL;
510 	init_timer(&queue->credit_timeout);
511 	queue->credit_timeout.function = xenvif_tx_credit_callback;
512 	queue->credit_window_start = get_jiffies_64();
513 
514 	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
515 
516 	skb_queue_head_init(&queue->rx_queue);
517 	skb_queue_head_init(&queue->tx_queue);
518 
519 	queue->pending_cons = 0;
520 	queue->pending_prod = MAX_PENDING_REQS;
521 	for (i = 0; i < MAX_PENDING_REQS; ++i)
522 		queue->pending_ring[i] = i;
523 
524 	spin_lock_init(&queue->callback_lock);
525 	spin_lock_init(&queue->response_lock);
526 
527 	/* If ballooning is disabled, this will consume real memory, so you
528 	 * better enable it. The long term solution would be to use just a
529 	 * bunch of valid page descriptors, without dependency on ballooning
530 	 */
531 	err = gnttab_alloc_pages(MAX_PENDING_REQS,
532 				 queue->mmap_pages);
533 	if (err) {
534 		netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
535 		return -ENOMEM;
536 	}
537 
538 	for (i = 0; i < MAX_PENDING_REQS; i++) {
539 		queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
540 			{ .callback = xenvif_zerocopy_callback,
541 			  .ctx = NULL,
542 			  .desc = i };
543 		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
544 	}
545 
546 	return 0;
547 }
548 
549 void xenvif_carrier_on(struct xenvif *vif)
550 {
551 	rtnl_lock();
552 	if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
553 		dev_set_mtu(vif->dev, ETH_DATA_LEN);
554 	netdev_update_features(vif->dev);
555 	set_bit(VIF_STATUS_CONNECTED, &vif->status);
556 	if (netif_running(vif->dev))
557 		xenvif_up(vif);
558 	rtnl_unlock();
559 }
560 
561 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
562 			unsigned int evtchn)
563 {
564 	struct net_device *dev = vif->dev;
565 	void *addr;
566 	struct xen_netif_ctrl_sring *shared;
567 	int err;
568 
569 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
570 				     &ring_ref, 1, &addr);
571 	if (err)
572 		goto err;
573 
574 	shared = (struct xen_netif_ctrl_sring *)addr;
575 	BACK_RING_INIT(&vif->ctrl, shared, XEN_PAGE_SIZE);
576 
577 	err = bind_interdomain_evtchn_to_irq(vif->domid, evtchn);
578 	if (err < 0)
579 		goto err_unmap;
580 
581 	vif->ctrl_irq = err;
582 
583 	xenvif_init_hash(vif);
584 
585 	err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
586 				   IRQF_ONESHOT, "xen-netback-ctrl", vif);
587 	if (err) {
588 		pr_warn("Could not setup irq handler for %s\n", dev->name);
589 		goto err_deinit;
590 	}
591 
592 	return 0;
593 
594 err_deinit:
595 	xenvif_deinit_hash(vif);
596 	unbind_from_irqhandler(vif->ctrl_irq, vif);
597 	vif->ctrl_irq = 0;
598 
599 err_unmap:
600 	xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
601 				vif->ctrl.sring);
602 	vif->ctrl.sring = NULL;
603 
604 err:
605 	return err;
606 }
607 
608 int xenvif_connect_data(struct xenvif_queue *queue,
609 			unsigned long tx_ring_ref,
610 			unsigned long rx_ring_ref,
611 			unsigned int tx_evtchn,
612 			unsigned int rx_evtchn)
613 {
614 	struct task_struct *task;
615 	int err = -ENOMEM;
616 
617 	BUG_ON(queue->tx_irq);
618 	BUG_ON(queue->task);
619 	BUG_ON(queue->dealloc_task);
620 
621 	err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
622 					     rx_ring_ref);
623 	if (err < 0)
624 		goto err;
625 
626 	init_waitqueue_head(&queue->wq);
627 	init_waitqueue_head(&queue->dealloc_wq);
628 	atomic_set(&queue->inflight_packets, 0);
629 
630 	netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
631 			XENVIF_NAPI_WEIGHT);
632 
633 	if (tx_evtchn == rx_evtchn) {
634 		/* feature-split-event-channels == 0 */
635 		err = bind_interdomain_evtchn_to_irqhandler(
636 			queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
637 			queue->name, queue);
638 		if (err < 0)
639 			goto err_unmap;
640 		queue->tx_irq = queue->rx_irq = err;
641 		disable_irq(queue->tx_irq);
642 	} else {
643 		/* feature-split-event-channels == 1 */
644 		snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
645 			 "%s-tx", queue->name);
646 		err = bind_interdomain_evtchn_to_irqhandler(
647 			queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
648 			queue->tx_irq_name, queue);
649 		if (err < 0)
650 			goto err_unmap;
651 		queue->tx_irq = err;
652 		disable_irq(queue->tx_irq);
653 
654 		snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
655 			 "%s-rx", queue->name);
656 		err = bind_interdomain_evtchn_to_irqhandler(
657 			queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
658 			queue->rx_irq_name, queue);
659 		if (err < 0)
660 			goto err_tx_unbind;
661 		queue->rx_irq = err;
662 		disable_irq(queue->rx_irq);
663 	}
664 
665 	queue->stalled = true;
666 
667 	task = kthread_create(xenvif_kthread_guest_rx,
668 			      (void *)queue, "%s-guest-rx", queue->name);
669 	if (IS_ERR(task)) {
670 		pr_warn("Could not allocate kthread for %s\n", queue->name);
671 		err = PTR_ERR(task);
672 		goto err_rx_unbind;
673 	}
674 	queue->task = task;
675 	get_task_struct(task);
676 
677 	task = kthread_create(xenvif_dealloc_kthread,
678 			      (void *)queue, "%s-dealloc", queue->name);
679 	if (IS_ERR(task)) {
680 		pr_warn("Could not allocate kthread for %s\n", queue->name);
681 		err = PTR_ERR(task);
682 		goto err_rx_unbind;
683 	}
684 	queue->dealloc_task = task;
685 
686 	wake_up_process(queue->task);
687 	wake_up_process(queue->dealloc_task);
688 
689 	return 0;
690 
691 err_rx_unbind:
692 	unbind_from_irqhandler(queue->rx_irq, queue);
693 	queue->rx_irq = 0;
694 err_tx_unbind:
695 	unbind_from_irqhandler(queue->tx_irq, queue);
696 	queue->tx_irq = 0;
697 err_unmap:
698 	xenvif_unmap_frontend_data_rings(queue);
699 	netif_napi_del(&queue->napi);
700 err:
701 	module_put(THIS_MODULE);
702 	return err;
703 }
704 
705 void xenvif_carrier_off(struct xenvif *vif)
706 {
707 	struct net_device *dev = vif->dev;
708 
709 	rtnl_lock();
710 	if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
711 		netif_carrier_off(dev); /* discard queued packets */
712 		if (netif_running(dev))
713 			xenvif_down(vif);
714 	}
715 	rtnl_unlock();
716 }
717 
718 void xenvif_disconnect_data(struct xenvif *vif)
719 {
720 	struct xenvif_queue *queue = NULL;
721 	unsigned int num_queues = vif->num_queues;
722 	unsigned int queue_index;
723 
724 	xenvif_carrier_off(vif);
725 
726 	for (queue_index = 0; queue_index < num_queues; ++queue_index) {
727 		queue = &vif->queues[queue_index];
728 
729 		netif_napi_del(&queue->napi);
730 
731 		if (queue->task) {
732 			kthread_stop(queue->task);
733 			put_task_struct(queue->task);
734 			queue->task = NULL;
735 		}
736 
737 		if (queue->dealloc_task) {
738 			kthread_stop(queue->dealloc_task);
739 			queue->dealloc_task = NULL;
740 		}
741 
742 		if (queue->tx_irq) {
743 			if (queue->tx_irq == queue->rx_irq)
744 				unbind_from_irqhandler(queue->tx_irq, queue);
745 			else {
746 				unbind_from_irqhandler(queue->tx_irq, queue);
747 				unbind_from_irqhandler(queue->rx_irq, queue);
748 			}
749 			queue->tx_irq = 0;
750 		}
751 
752 		xenvif_unmap_frontend_data_rings(queue);
753 	}
754 
755 	xenvif_mcast_addr_list_free(vif);
756 }
757 
758 void xenvif_disconnect_ctrl(struct xenvif *vif)
759 {
760 	if (vif->ctrl_irq) {
761 		xenvif_deinit_hash(vif);
762 		unbind_from_irqhandler(vif->ctrl_irq, vif);
763 		vif->ctrl_irq = 0;
764 	}
765 
766 	if (vif->ctrl.sring) {
767 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
768 					vif->ctrl.sring);
769 		vif->ctrl.sring = NULL;
770 	}
771 }
772 
773 /* Reverse the relevant parts of xenvif_init_queue().
774  * Used for queue teardown from xenvif_free(), and on the
775  * error handling paths in xenbus.c:connect().
776  */
777 void xenvif_deinit_queue(struct xenvif_queue *queue)
778 {
779 	gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
780 }
781 
782 void xenvif_free(struct xenvif *vif)
783 {
784 	struct xenvif_queue *queues = vif->queues;
785 	unsigned int num_queues = vif->num_queues;
786 	unsigned int queue_index;
787 
788 	unregister_netdev(vif->dev);
789 	free_netdev(vif->dev);
790 
791 	for (queue_index = 0; queue_index < num_queues; ++queue_index)
792 		xenvif_deinit_queue(&queues[queue_index]);
793 	vfree(queues);
794 
795 	module_put(THIS_MODULE);
796 }
797