xref: /linux/drivers/net/xen-netfront.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 
48 #include <xen/xen.h>
49 #include <xen/xenbus.h>
50 #include <xen/events.h>
51 #include <xen/page.h>
52 #include <xen/platform_pci.h>
53 #include <xen/grant_table.h>
54 
55 #include <xen/interface/io/netif.h>
56 #include <xen/interface/memory.h>
57 #include <xen/interface/grant_table.h>
58 
59 /* Module parameters */
60 static unsigned int xennet_max_queues;
61 module_param_named(max_queues, xennet_max_queues, uint, 0644);
62 MODULE_PARM_DESC(max_queues,
63 		 "Maximum number of queues per virtual interface");
64 
65 static const struct ethtool_ops xennet_ethtool_ops;
66 
67 struct netfront_cb {
68 	int pull_to;
69 };
70 
71 #define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
72 
73 #define RX_COPY_THRESHOLD 256
74 
75 #define GRANT_INVALID_REF	0
76 
77 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
78 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
79 
80 /* Minimum number of Rx slots (includes slot for GSO metadata). */
81 #define NET_RX_SLOTS_MIN (XEN_NETIF_NR_SLOTS_MIN + 1)
82 
83 /* Queue name is interface name with "-qNNN" appended */
84 #define QUEUE_NAME_SIZE (IFNAMSIZ + 6)
85 
86 /* IRQ name is queue name with "-tx" or "-rx" appended */
87 #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
88 
89 struct netfront_stats {
90 	u64			packets;
91 	u64			bytes;
92 	struct u64_stats_sync	syncp;
93 };
94 
95 struct netfront_info;
96 
97 struct netfront_queue {
98 	unsigned int id; /* Queue ID, 0-based */
99 	char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */
100 	struct netfront_info *info;
101 
102 	struct napi_struct napi;
103 
104 	/* Split event channels support, tx_* == rx_* when using
105 	 * single event channel.
106 	 */
107 	unsigned int tx_evtchn, rx_evtchn;
108 	unsigned int tx_irq, rx_irq;
109 	/* Only used when split event channels support is enabled */
110 	char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */
111 	char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
112 
113 	spinlock_t   tx_lock;
114 	struct xen_netif_tx_front_ring tx;
115 	int tx_ring_ref;
116 
117 	/*
118 	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
119 	 * are linked from tx_skb_freelist through skb_entry.link.
120 	 *
121 	 *  NB. Freelist index entries are always going to be less than
122 	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
123 	 *  greater than PAGE_OFFSET: we use this property to distinguish
124 	 *  them.
125 	 */
126 	union skb_entry {
127 		struct sk_buff *skb;
128 		unsigned long link;
129 	} tx_skbs[NET_TX_RING_SIZE];
130 	grant_ref_t gref_tx_head;
131 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
132 	struct page *grant_tx_page[NET_TX_RING_SIZE];
133 	unsigned tx_skb_freelist;
134 
135 	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
136 	struct xen_netif_rx_front_ring rx;
137 	int rx_ring_ref;
138 
139 	struct timer_list rx_refill_timer;
140 
141 	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
142 	grant_ref_t gref_rx_head;
143 	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
144 };
145 
146 struct netfront_info {
147 	struct list_head list;
148 	struct net_device *netdev;
149 
150 	struct xenbus_device *xbdev;
151 
152 	/* Multi-queue support */
153 	struct netfront_queue *queues;
154 
155 	/* Statistics */
156 	struct netfront_stats __percpu *rx_stats;
157 	struct netfront_stats __percpu *tx_stats;
158 
159 	atomic_t rx_gso_checksum_fixup;
160 };
161 
162 struct netfront_rx_info {
163 	struct xen_netif_rx_response rx;
164 	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
165 };
166 
167 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
168 {
169 	list->link = id;
170 }
171 
172 static int skb_entry_is_link(const union skb_entry *list)
173 {
174 	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
175 	return (unsigned long)list->skb < PAGE_OFFSET;
176 }
177 
178 /*
179  * Access macros for acquiring freeing slots in tx_skbs[].
180  */
181 
182 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
183 			       unsigned short id)
184 {
185 	skb_entry_set_link(&list[id], *head);
186 	*head = id;
187 }
188 
189 static unsigned short get_id_from_freelist(unsigned *head,
190 					   union skb_entry *list)
191 {
192 	unsigned int id = *head;
193 	*head = list[id].link;
194 	return id;
195 }
196 
197 static int xennet_rxidx(RING_IDX idx)
198 {
199 	return idx & (NET_RX_RING_SIZE - 1);
200 }
201 
202 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue,
203 					 RING_IDX ri)
204 {
205 	int i = xennet_rxidx(ri);
206 	struct sk_buff *skb = queue->rx_skbs[i];
207 	queue->rx_skbs[i] = NULL;
208 	return skb;
209 }
210 
211 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue,
212 					    RING_IDX ri)
213 {
214 	int i = xennet_rxidx(ri);
215 	grant_ref_t ref = queue->grant_rx_ref[i];
216 	queue->grant_rx_ref[i] = GRANT_INVALID_REF;
217 	return ref;
218 }
219 
220 #ifdef CONFIG_SYSFS
221 static const struct attribute_group xennet_dev_group;
222 #endif
223 
224 static bool xennet_can_sg(struct net_device *dev)
225 {
226 	return dev->features & NETIF_F_SG;
227 }
228 
229 
230 static void rx_refill_timeout(unsigned long data)
231 {
232 	struct netfront_queue *queue = (struct netfront_queue *)data;
233 	napi_schedule(&queue->napi);
234 }
235 
236 static int netfront_tx_slot_available(struct netfront_queue *queue)
237 {
238 	return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) <
239 		(NET_TX_RING_SIZE - MAX_SKB_FRAGS - 2);
240 }
241 
242 static void xennet_maybe_wake_tx(struct netfront_queue *queue)
243 {
244 	struct net_device *dev = queue->info->netdev;
245 	struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, queue->id);
246 
247 	if (unlikely(netif_tx_queue_stopped(dev_queue)) &&
248 	    netfront_tx_slot_available(queue) &&
249 	    likely(netif_running(dev)))
250 		netif_tx_wake_queue(netdev_get_tx_queue(dev, queue->id));
251 }
252 
253 
254 static struct sk_buff *xennet_alloc_one_rx_buffer(struct netfront_queue *queue)
255 {
256 	struct sk_buff *skb;
257 	struct page *page;
258 
259 	skb = __netdev_alloc_skb(queue->info->netdev,
260 				 RX_COPY_THRESHOLD + NET_IP_ALIGN,
261 				 GFP_ATOMIC | __GFP_NOWARN);
262 	if (unlikely(!skb))
263 		return NULL;
264 
265 	page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
266 	if (!page) {
267 		kfree_skb(skb);
268 		return NULL;
269 	}
270 	skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
271 
272 	/* Align ip header to a 16 bytes boundary */
273 	skb_reserve(skb, NET_IP_ALIGN);
274 	skb->dev = queue->info->netdev;
275 
276 	return skb;
277 }
278 
279 
280 static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
281 {
282 	RING_IDX req_prod = queue->rx.req_prod_pvt;
283 	int notify;
284 
285 	if (unlikely(!netif_carrier_ok(queue->info->netdev)))
286 		return;
287 
288 	for (req_prod = queue->rx.req_prod_pvt;
289 	     req_prod - queue->rx.rsp_cons < NET_RX_RING_SIZE;
290 	     req_prod++) {
291 		struct sk_buff *skb;
292 		unsigned short id;
293 		grant_ref_t ref;
294 		unsigned long gfn;
295 		struct xen_netif_rx_request *req;
296 
297 		skb = xennet_alloc_one_rx_buffer(queue);
298 		if (!skb)
299 			break;
300 
301 		id = xennet_rxidx(req_prod);
302 
303 		BUG_ON(queue->rx_skbs[id]);
304 		queue->rx_skbs[id] = skb;
305 
306 		ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
307 		BUG_ON((signed short)ref < 0);
308 		queue->grant_rx_ref[id] = ref;
309 
310 		gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
311 
312 		req = RING_GET_REQUEST(&queue->rx, req_prod);
313 		gnttab_grant_foreign_access_ref(ref,
314 						queue->info->xbdev->otherend_id,
315 						gfn,
316 						0);
317 
318 		req->id = id;
319 		req->gref = ref;
320 	}
321 
322 	queue->rx.req_prod_pvt = req_prod;
323 
324 	/* Not enough requests? Try again later. */
325 	if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
326 		mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
327 		return;
328 	}
329 
330 	wmb();		/* barrier so backend seens requests */
331 
332 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify);
333 	if (notify)
334 		notify_remote_via_irq(queue->rx_irq);
335 }
336 
337 static int xennet_open(struct net_device *dev)
338 {
339 	struct netfront_info *np = netdev_priv(dev);
340 	unsigned int num_queues = dev->real_num_tx_queues;
341 	unsigned int i = 0;
342 	struct netfront_queue *queue = NULL;
343 
344 	for (i = 0; i < num_queues; ++i) {
345 		queue = &np->queues[i];
346 		napi_enable(&queue->napi);
347 
348 		spin_lock_bh(&queue->rx_lock);
349 		if (netif_carrier_ok(dev)) {
350 			xennet_alloc_rx_buffers(queue);
351 			queue->rx.sring->rsp_event = queue->rx.rsp_cons + 1;
352 			if (RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))
353 				napi_schedule(&queue->napi);
354 		}
355 		spin_unlock_bh(&queue->rx_lock);
356 	}
357 
358 	netif_tx_start_all_queues(dev);
359 
360 	return 0;
361 }
362 
363 static void xennet_tx_buf_gc(struct netfront_queue *queue)
364 {
365 	RING_IDX cons, prod;
366 	unsigned short id;
367 	struct sk_buff *skb;
368 
369 	BUG_ON(!netif_carrier_ok(queue->info->netdev));
370 
371 	do {
372 		prod = queue->tx.sring->rsp_prod;
373 		rmb(); /* Ensure we see responses up to 'rp'. */
374 
375 		for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
376 			struct xen_netif_tx_response *txrsp;
377 
378 			txrsp = RING_GET_RESPONSE(&queue->tx, cons);
379 			if (txrsp->status == XEN_NETIF_RSP_NULL)
380 				continue;
381 
382 			id  = txrsp->id;
383 			skb = queue->tx_skbs[id].skb;
384 			if (unlikely(gnttab_query_foreign_access(
385 				queue->grant_tx_ref[id]) != 0)) {
386 				pr_alert("%s: warning -- grant still in use by backend domain\n",
387 					 __func__);
388 				BUG();
389 			}
390 			gnttab_end_foreign_access_ref(
391 				queue->grant_tx_ref[id], GNTMAP_readonly);
392 			gnttab_release_grant_reference(
393 				&queue->gref_tx_head, queue->grant_tx_ref[id]);
394 			queue->grant_tx_ref[id] = GRANT_INVALID_REF;
395 			queue->grant_tx_page[id] = NULL;
396 			add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, id);
397 			dev_kfree_skb_irq(skb);
398 		}
399 
400 		queue->tx.rsp_cons = prod;
401 
402 		/*
403 		 * Set a new event, then check for race with update of tx_cons.
404 		 * Note that it is essential to schedule a callback, no matter
405 		 * how few buffers are pending. Even if there is space in the
406 		 * transmit ring, higher layers may be blocked because too much
407 		 * data is outstanding: in such cases notification from Xen is
408 		 * likely to be the only kick that we'll get.
409 		 */
410 		queue->tx.sring->rsp_event =
411 			prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1;
412 		mb();		/* update shared area */
413 	} while ((cons == prod) && (prod != queue->tx.sring->rsp_prod));
414 
415 	xennet_maybe_wake_tx(queue);
416 }
417 
418 static struct xen_netif_tx_request *xennet_make_one_txreq(
419 	struct netfront_queue *queue, struct sk_buff *skb,
420 	struct page *page, unsigned int offset, unsigned int len)
421 {
422 	unsigned int id;
423 	struct xen_netif_tx_request *tx;
424 	grant_ref_t ref;
425 
426 	len = min_t(unsigned int, PAGE_SIZE - offset, len);
427 
428 	id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
429 	tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
430 	ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
431 	BUG_ON((signed short)ref < 0);
432 
433 	gnttab_grant_foreign_access_ref(ref,
434 					queue->info->xbdev->otherend_id,
435 					xen_page_to_gfn(page),
436 					GNTMAP_readonly);
437 
438 	queue->tx_skbs[id].skb = skb;
439 	queue->grant_tx_page[id] = page;
440 	queue->grant_tx_ref[id] = ref;
441 
442 	tx->id = id;
443 	tx->gref = ref;
444 	tx->offset = offset;
445 	tx->size = len;
446 	tx->flags = 0;
447 
448 	return tx;
449 }
450 
451 static struct xen_netif_tx_request *xennet_make_txreqs(
452 	struct netfront_queue *queue, struct xen_netif_tx_request *tx,
453 	struct sk_buff *skb, struct page *page,
454 	unsigned int offset, unsigned int len)
455 {
456 	/* Skip unused frames from start of page */
457 	page += offset >> PAGE_SHIFT;
458 	offset &= ~PAGE_MASK;
459 
460 	while (len) {
461 		tx->flags |= XEN_NETTXF_more_data;
462 		tx = xennet_make_one_txreq(queue, skb_get(skb),
463 					   page, offset, len);
464 		page++;
465 		offset = 0;
466 		len -= tx->size;
467 	}
468 
469 	return tx;
470 }
471 
472 /*
473  * Count how many ring slots are required to send this skb. Each frag
474  * might be a compound page.
475  */
476 static int xennet_count_skb_slots(struct sk_buff *skb)
477 {
478 	int i, frags = skb_shinfo(skb)->nr_frags;
479 	int pages;
480 
481 	pages = PFN_UP(offset_in_page(skb->data) + skb_headlen(skb));
482 
483 	for (i = 0; i < frags; i++) {
484 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
485 		unsigned long size = skb_frag_size(frag);
486 		unsigned long offset = frag->page_offset;
487 
488 		/* Skip unused frames from start of page */
489 		offset &= ~PAGE_MASK;
490 
491 		pages += PFN_UP(offset + size);
492 	}
493 
494 	return pages;
495 }
496 
497 static u16 xennet_select_queue(struct net_device *dev, struct sk_buff *skb,
498 			       void *accel_priv, select_queue_fallback_t fallback)
499 {
500 	unsigned int num_queues = dev->real_num_tx_queues;
501 	u32 hash;
502 	u16 queue_idx;
503 
504 	/* First, check if there is only one queue */
505 	if (num_queues == 1) {
506 		queue_idx = 0;
507 	} else {
508 		hash = skb_get_hash(skb);
509 		queue_idx = hash % num_queues;
510 	}
511 
512 	return queue_idx;
513 }
514 
515 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
516 {
517 	struct netfront_info *np = netdev_priv(dev);
518 	struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
519 	struct xen_netif_tx_request *tx, *first_tx;
520 	unsigned int i;
521 	int notify;
522 	int slots;
523 	struct page *page;
524 	unsigned int offset;
525 	unsigned int len;
526 	unsigned long flags;
527 	struct netfront_queue *queue = NULL;
528 	unsigned int num_queues = dev->real_num_tx_queues;
529 	u16 queue_index;
530 
531 	/* Drop the packet if no queues are set up */
532 	if (num_queues < 1)
533 		goto drop;
534 	/* Determine which queue to transmit this SKB on */
535 	queue_index = skb_get_queue_mapping(skb);
536 	queue = &np->queues[queue_index];
537 
538 	/* If skb->len is too big for wire format, drop skb and alert
539 	 * user about misconfiguration.
540 	 */
541 	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
542 		net_alert_ratelimited(
543 			"xennet: skb->len = %u, too big for wire format\n",
544 			skb->len);
545 		goto drop;
546 	}
547 
548 	slots = xennet_count_skb_slots(skb);
549 	if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
550 		net_dbg_ratelimited("xennet: skb rides the rocket: %d slots, %d bytes\n",
551 				    slots, skb->len);
552 		if (skb_linearize(skb))
553 			goto drop;
554 	}
555 
556 	page = virt_to_page(skb->data);
557 	offset = offset_in_page(skb->data);
558 	len = skb_headlen(skb);
559 
560 	spin_lock_irqsave(&queue->tx_lock, flags);
561 
562 	if (unlikely(!netif_carrier_ok(dev) ||
563 		     (slots > 1 && !xennet_can_sg(dev)) ||
564 		     netif_needs_gso(skb, netif_skb_features(skb)))) {
565 		spin_unlock_irqrestore(&queue->tx_lock, flags);
566 		goto drop;
567 	}
568 
569 	/* First request for the linear area. */
570 	first_tx = tx = xennet_make_one_txreq(queue, skb,
571 					      page, offset, len);
572 	page++;
573 	offset = 0;
574 	len -= tx->size;
575 
576 	if (skb->ip_summed == CHECKSUM_PARTIAL)
577 		/* local packet? */
578 		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
579 	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
580 		/* remote but checksummed. */
581 		tx->flags |= XEN_NETTXF_data_validated;
582 
583 	/* Optional extra info after the first request. */
584 	if (skb_shinfo(skb)->gso_size) {
585 		struct xen_netif_extra_info *gso;
586 
587 		gso = (struct xen_netif_extra_info *)
588 			RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
589 
590 		tx->flags |= XEN_NETTXF_extra_info;
591 
592 		gso->u.gso.size = skb_shinfo(skb)->gso_size;
593 		gso->u.gso.type = (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) ?
594 			XEN_NETIF_GSO_TYPE_TCPV6 :
595 			XEN_NETIF_GSO_TYPE_TCPV4;
596 		gso->u.gso.pad = 0;
597 		gso->u.gso.features = 0;
598 
599 		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
600 		gso->flags = 0;
601 	}
602 
603 	/* Requests for the rest of the linear area. */
604 	tx = xennet_make_txreqs(queue, tx, skb, page, offset, len);
605 
606 	/* Requests for all the frags. */
607 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
608 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
609 		tx = xennet_make_txreqs(queue, tx, skb,
610 					skb_frag_page(frag), frag->page_offset,
611 					skb_frag_size(frag));
612 	}
613 
614 	/* First request has the packet length. */
615 	first_tx->size = skb->len;
616 
617 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify);
618 	if (notify)
619 		notify_remote_via_irq(queue->tx_irq);
620 
621 	u64_stats_update_begin(&tx_stats->syncp);
622 	tx_stats->bytes += skb->len;
623 	tx_stats->packets++;
624 	u64_stats_update_end(&tx_stats->syncp);
625 
626 	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
627 	xennet_tx_buf_gc(queue);
628 
629 	if (!netfront_tx_slot_available(queue))
630 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
631 
632 	spin_unlock_irqrestore(&queue->tx_lock, flags);
633 
634 	return NETDEV_TX_OK;
635 
636  drop:
637 	dev->stats.tx_dropped++;
638 	dev_kfree_skb_any(skb);
639 	return NETDEV_TX_OK;
640 }
641 
642 static int xennet_close(struct net_device *dev)
643 {
644 	struct netfront_info *np = netdev_priv(dev);
645 	unsigned int num_queues = dev->real_num_tx_queues;
646 	unsigned int i;
647 	struct netfront_queue *queue;
648 	netif_tx_stop_all_queues(np->netdev);
649 	for (i = 0; i < num_queues; ++i) {
650 		queue = &np->queues[i];
651 		napi_disable(&queue->napi);
652 	}
653 	return 0;
654 }
655 
656 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
657 				grant_ref_t ref)
658 {
659 	int new = xennet_rxidx(queue->rx.req_prod_pvt);
660 
661 	BUG_ON(queue->rx_skbs[new]);
662 	queue->rx_skbs[new] = skb;
663 	queue->grant_rx_ref[new] = ref;
664 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->id = new;
665 	RING_GET_REQUEST(&queue->rx, queue->rx.req_prod_pvt)->gref = ref;
666 	queue->rx.req_prod_pvt++;
667 }
668 
669 static int xennet_get_extras(struct netfront_queue *queue,
670 			     struct xen_netif_extra_info *extras,
671 			     RING_IDX rp)
672 
673 {
674 	struct xen_netif_extra_info *extra;
675 	struct device *dev = &queue->info->netdev->dev;
676 	RING_IDX cons = queue->rx.rsp_cons;
677 	int err = 0;
678 
679 	do {
680 		struct sk_buff *skb;
681 		grant_ref_t ref;
682 
683 		if (unlikely(cons + 1 == rp)) {
684 			if (net_ratelimit())
685 				dev_warn(dev, "Missing extra info\n");
686 			err = -EBADR;
687 			break;
688 		}
689 
690 		extra = (struct xen_netif_extra_info *)
691 			RING_GET_RESPONSE(&queue->rx, ++cons);
692 
693 		if (unlikely(!extra->type ||
694 			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
695 			if (net_ratelimit())
696 				dev_warn(dev, "Invalid extra type: %d\n",
697 					extra->type);
698 			err = -EINVAL;
699 		} else {
700 			memcpy(&extras[extra->type - 1], extra,
701 			       sizeof(*extra));
702 		}
703 
704 		skb = xennet_get_rx_skb(queue, cons);
705 		ref = xennet_get_rx_ref(queue, cons);
706 		xennet_move_rx_slot(queue, skb, ref);
707 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
708 
709 	queue->rx.rsp_cons = cons;
710 	return err;
711 }
712 
713 static int xennet_get_responses(struct netfront_queue *queue,
714 				struct netfront_rx_info *rinfo, RING_IDX rp,
715 				struct sk_buff_head *list)
716 {
717 	struct xen_netif_rx_response *rx = &rinfo->rx;
718 	struct xen_netif_extra_info *extras = rinfo->extras;
719 	struct device *dev = &queue->info->netdev->dev;
720 	RING_IDX cons = queue->rx.rsp_cons;
721 	struct sk_buff *skb = xennet_get_rx_skb(queue, cons);
722 	grant_ref_t ref = xennet_get_rx_ref(queue, cons);
723 	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
724 	int slots = 1;
725 	int err = 0;
726 	unsigned long ret;
727 
728 	if (rx->flags & XEN_NETRXF_extra_info) {
729 		err = xennet_get_extras(queue, extras, rp);
730 		cons = queue->rx.rsp_cons;
731 	}
732 
733 	for (;;) {
734 		if (unlikely(rx->status < 0 ||
735 			     rx->offset + rx->status > PAGE_SIZE)) {
736 			if (net_ratelimit())
737 				dev_warn(dev, "rx->offset: %u, size: %d\n",
738 					 rx->offset, rx->status);
739 			xennet_move_rx_slot(queue, skb, ref);
740 			err = -EINVAL;
741 			goto next;
742 		}
743 
744 		/*
745 		 * This definitely indicates a bug, either in this driver or in
746 		 * the backend driver. In future this should flag the bad
747 		 * situation to the system controller to reboot the backend.
748 		 */
749 		if (ref == GRANT_INVALID_REF) {
750 			if (net_ratelimit())
751 				dev_warn(dev, "Bad rx response id %d.\n",
752 					 rx->id);
753 			err = -EINVAL;
754 			goto next;
755 		}
756 
757 		ret = gnttab_end_foreign_access_ref(ref, 0);
758 		BUG_ON(!ret);
759 
760 		gnttab_release_grant_reference(&queue->gref_rx_head, ref);
761 
762 		__skb_queue_tail(list, skb);
763 
764 next:
765 		if (!(rx->flags & XEN_NETRXF_more_data))
766 			break;
767 
768 		if (cons + slots == rp) {
769 			if (net_ratelimit())
770 				dev_warn(dev, "Need more slots\n");
771 			err = -ENOENT;
772 			break;
773 		}
774 
775 		rx = RING_GET_RESPONSE(&queue->rx, cons + slots);
776 		skb = xennet_get_rx_skb(queue, cons + slots);
777 		ref = xennet_get_rx_ref(queue, cons + slots);
778 		slots++;
779 	}
780 
781 	if (unlikely(slots > max)) {
782 		if (net_ratelimit())
783 			dev_warn(dev, "Too many slots\n");
784 		err = -E2BIG;
785 	}
786 
787 	if (unlikely(err))
788 		queue->rx.rsp_cons = cons + slots;
789 
790 	return err;
791 }
792 
793 static int xennet_set_skb_gso(struct sk_buff *skb,
794 			      struct xen_netif_extra_info *gso)
795 {
796 	if (!gso->u.gso.size) {
797 		if (net_ratelimit())
798 			pr_warn("GSO size must not be zero\n");
799 		return -EINVAL;
800 	}
801 
802 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4 &&
803 	    gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV6) {
804 		if (net_ratelimit())
805 			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
806 		return -EINVAL;
807 	}
808 
809 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
810 	skb_shinfo(skb)->gso_type =
811 		(gso->u.gso.type == XEN_NETIF_GSO_TYPE_TCPV4) ?
812 		SKB_GSO_TCPV4 :
813 		SKB_GSO_TCPV6;
814 
815 	/* Header must be checked, and gso_segs computed. */
816 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
817 	skb_shinfo(skb)->gso_segs = 0;
818 
819 	return 0;
820 }
821 
822 static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
823 				  struct sk_buff *skb,
824 				  struct sk_buff_head *list)
825 {
826 	struct skb_shared_info *shinfo = skb_shinfo(skb);
827 	RING_IDX cons = queue->rx.rsp_cons;
828 	struct sk_buff *nskb;
829 
830 	while ((nskb = __skb_dequeue(list))) {
831 		struct xen_netif_rx_response *rx =
832 			RING_GET_RESPONSE(&queue->rx, ++cons);
833 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
834 
835 		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
836 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
837 
838 			BUG_ON(pull_to <= skb_headlen(skb));
839 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
840 		}
841 		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
842 
843 		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
844 				rx->offset, rx->status, PAGE_SIZE);
845 
846 		skb_shinfo(nskb)->nr_frags = 0;
847 		kfree_skb(nskb);
848 	}
849 
850 	return cons;
851 }
852 
853 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
854 {
855 	bool recalculate_partial_csum = false;
856 
857 	/*
858 	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
859 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
860 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
861 	 * recalculate the partial checksum.
862 	 */
863 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
864 		struct netfront_info *np = netdev_priv(dev);
865 		atomic_inc(&np->rx_gso_checksum_fixup);
866 		skb->ip_summed = CHECKSUM_PARTIAL;
867 		recalculate_partial_csum = true;
868 	}
869 
870 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
871 	if (skb->ip_summed != CHECKSUM_PARTIAL)
872 		return 0;
873 
874 	return skb_checksum_setup(skb, recalculate_partial_csum);
875 }
876 
877 static int handle_incoming_queue(struct netfront_queue *queue,
878 				 struct sk_buff_head *rxq)
879 {
880 	struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
881 	int packets_dropped = 0;
882 	struct sk_buff *skb;
883 
884 	while ((skb = __skb_dequeue(rxq)) != NULL) {
885 		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
886 
887 		if (pull_to > skb_headlen(skb))
888 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
889 
890 		/* Ethernet work: Delayed to here as it peeks the header. */
891 		skb->protocol = eth_type_trans(skb, queue->info->netdev);
892 		skb_reset_network_header(skb);
893 
894 		if (checksum_setup(queue->info->netdev, skb)) {
895 			kfree_skb(skb);
896 			packets_dropped++;
897 			queue->info->netdev->stats.rx_errors++;
898 			continue;
899 		}
900 
901 		u64_stats_update_begin(&rx_stats->syncp);
902 		rx_stats->packets++;
903 		rx_stats->bytes += skb->len;
904 		u64_stats_update_end(&rx_stats->syncp);
905 
906 		/* Pass it up. */
907 		napi_gro_receive(&queue->napi, skb);
908 	}
909 
910 	return packets_dropped;
911 }
912 
913 static int xennet_poll(struct napi_struct *napi, int budget)
914 {
915 	struct netfront_queue *queue = container_of(napi, struct netfront_queue, napi);
916 	struct net_device *dev = queue->info->netdev;
917 	struct sk_buff *skb;
918 	struct netfront_rx_info rinfo;
919 	struct xen_netif_rx_response *rx = &rinfo.rx;
920 	struct xen_netif_extra_info *extras = rinfo.extras;
921 	RING_IDX i, rp;
922 	int work_done;
923 	struct sk_buff_head rxq;
924 	struct sk_buff_head errq;
925 	struct sk_buff_head tmpq;
926 	int err;
927 
928 	spin_lock(&queue->rx_lock);
929 
930 	skb_queue_head_init(&rxq);
931 	skb_queue_head_init(&errq);
932 	skb_queue_head_init(&tmpq);
933 
934 	rp = queue->rx.sring->rsp_prod;
935 	rmb(); /* Ensure we see queued responses up to 'rp'. */
936 
937 	i = queue->rx.rsp_cons;
938 	work_done = 0;
939 	while ((i != rp) && (work_done < budget)) {
940 		memcpy(rx, RING_GET_RESPONSE(&queue->rx, i), sizeof(*rx));
941 		memset(extras, 0, sizeof(rinfo.extras));
942 
943 		err = xennet_get_responses(queue, &rinfo, rp, &tmpq);
944 
945 		if (unlikely(err)) {
946 err:
947 			while ((skb = __skb_dequeue(&tmpq)))
948 				__skb_queue_tail(&errq, skb);
949 			dev->stats.rx_errors++;
950 			i = queue->rx.rsp_cons;
951 			continue;
952 		}
953 
954 		skb = __skb_dequeue(&tmpq);
955 
956 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
957 			struct xen_netif_extra_info *gso;
958 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
959 
960 			if (unlikely(xennet_set_skb_gso(skb, gso))) {
961 				__skb_queue_head(&tmpq, skb);
962 				queue->rx.rsp_cons += skb_queue_len(&tmpq);
963 				goto err;
964 			}
965 		}
966 
967 		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
968 		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
969 			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
970 
971 		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
972 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
973 		skb->data_len = rx->status;
974 		skb->len += rx->status;
975 
976 		i = xennet_fill_frags(queue, skb, &tmpq);
977 
978 		if (rx->flags & XEN_NETRXF_csum_blank)
979 			skb->ip_summed = CHECKSUM_PARTIAL;
980 		else if (rx->flags & XEN_NETRXF_data_validated)
981 			skb->ip_summed = CHECKSUM_UNNECESSARY;
982 
983 		__skb_queue_tail(&rxq, skb);
984 
985 		queue->rx.rsp_cons = ++i;
986 		work_done++;
987 	}
988 
989 	__skb_queue_purge(&errq);
990 
991 	work_done -= handle_incoming_queue(queue, &rxq);
992 
993 	xennet_alloc_rx_buffers(queue);
994 
995 	if (work_done < budget) {
996 		int more_to_do = 0;
997 
998 		napi_complete(napi);
999 
1000 		RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
1001 		if (more_to_do)
1002 			napi_schedule(napi);
1003 	}
1004 
1005 	spin_unlock(&queue->rx_lock);
1006 
1007 	return work_done;
1008 }
1009 
1010 static int xennet_change_mtu(struct net_device *dev, int mtu)
1011 {
1012 	int max = xennet_can_sg(dev) ? XEN_NETIF_MAX_TX_SIZE : ETH_DATA_LEN;
1013 
1014 	if (mtu > max)
1015 		return -EINVAL;
1016 	dev->mtu = mtu;
1017 	return 0;
1018 }
1019 
1020 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1021 						    struct rtnl_link_stats64 *tot)
1022 {
1023 	struct netfront_info *np = netdev_priv(dev);
1024 	int cpu;
1025 
1026 	for_each_possible_cpu(cpu) {
1027 		struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
1028 		struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
1029 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1030 		unsigned int start;
1031 
1032 		do {
1033 			start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
1034 			tx_packets = tx_stats->packets;
1035 			tx_bytes = tx_stats->bytes;
1036 		} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
1037 
1038 		do {
1039 			start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
1040 			rx_packets = rx_stats->packets;
1041 			rx_bytes = rx_stats->bytes;
1042 		} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
1043 
1044 		tot->rx_packets += rx_packets;
1045 		tot->tx_packets += tx_packets;
1046 		tot->rx_bytes   += rx_bytes;
1047 		tot->tx_bytes   += tx_bytes;
1048 	}
1049 
1050 	tot->rx_errors  = dev->stats.rx_errors;
1051 	tot->tx_dropped = dev->stats.tx_dropped;
1052 
1053 	return tot;
1054 }
1055 
1056 static void xennet_release_tx_bufs(struct netfront_queue *queue)
1057 {
1058 	struct sk_buff *skb;
1059 	int i;
1060 
1061 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1062 		/* Skip over entries which are actually freelist references */
1063 		if (skb_entry_is_link(&queue->tx_skbs[i]))
1064 			continue;
1065 
1066 		skb = queue->tx_skbs[i].skb;
1067 		get_page(queue->grant_tx_page[i]);
1068 		gnttab_end_foreign_access(queue->grant_tx_ref[i],
1069 					  GNTMAP_readonly,
1070 					  (unsigned long)page_address(queue->grant_tx_page[i]));
1071 		queue->grant_tx_page[i] = NULL;
1072 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1073 		add_id_to_freelist(&queue->tx_skb_freelist, queue->tx_skbs, i);
1074 		dev_kfree_skb_irq(skb);
1075 	}
1076 }
1077 
1078 static void xennet_release_rx_bufs(struct netfront_queue *queue)
1079 {
1080 	int id, ref;
1081 
1082 	spin_lock_bh(&queue->rx_lock);
1083 
1084 	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1085 		struct sk_buff *skb;
1086 		struct page *page;
1087 
1088 		skb = queue->rx_skbs[id];
1089 		if (!skb)
1090 			continue;
1091 
1092 		ref = queue->grant_rx_ref[id];
1093 		if (ref == GRANT_INVALID_REF)
1094 			continue;
1095 
1096 		page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
1097 
1098 		/* gnttab_end_foreign_access() needs a page ref until
1099 		 * foreign access is ended (which may be deferred).
1100 		 */
1101 		get_page(page);
1102 		gnttab_end_foreign_access(ref, 0,
1103 					  (unsigned long)page_address(page));
1104 		queue->grant_rx_ref[id] = GRANT_INVALID_REF;
1105 
1106 		kfree_skb(skb);
1107 	}
1108 
1109 	spin_unlock_bh(&queue->rx_lock);
1110 }
1111 
1112 static netdev_features_t xennet_fix_features(struct net_device *dev,
1113 	netdev_features_t features)
1114 {
1115 	struct netfront_info *np = netdev_priv(dev);
1116 	int val;
1117 
1118 	if (features & NETIF_F_SG) {
1119 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1120 				 "%d", &val) < 0)
1121 			val = 0;
1122 
1123 		if (!val)
1124 			features &= ~NETIF_F_SG;
1125 	}
1126 
1127 	if (features & NETIF_F_IPV6_CSUM) {
1128 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1129 				 "feature-ipv6-csum-offload", "%d", &val) < 0)
1130 			val = 0;
1131 
1132 		if (!val)
1133 			features &= ~NETIF_F_IPV6_CSUM;
1134 	}
1135 
1136 	if (features & NETIF_F_TSO) {
1137 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1138 				 "feature-gso-tcpv4", "%d", &val) < 0)
1139 			val = 0;
1140 
1141 		if (!val)
1142 			features &= ~NETIF_F_TSO;
1143 	}
1144 
1145 	if (features & NETIF_F_TSO6) {
1146 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1147 				 "feature-gso-tcpv6", "%d", &val) < 0)
1148 			val = 0;
1149 
1150 		if (!val)
1151 			features &= ~NETIF_F_TSO6;
1152 	}
1153 
1154 	return features;
1155 }
1156 
1157 static int xennet_set_features(struct net_device *dev,
1158 	netdev_features_t features)
1159 {
1160 	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1161 		netdev_info(dev, "Reducing MTU because no SG offload");
1162 		dev->mtu = ETH_DATA_LEN;
1163 	}
1164 
1165 	return 0;
1166 }
1167 
1168 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1169 {
1170 	struct netfront_queue *queue = dev_id;
1171 	unsigned long flags;
1172 
1173 	spin_lock_irqsave(&queue->tx_lock, flags);
1174 	xennet_tx_buf_gc(queue);
1175 	spin_unlock_irqrestore(&queue->tx_lock, flags);
1176 
1177 	return IRQ_HANDLED;
1178 }
1179 
1180 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1181 {
1182 	struct netfront_queue *queue = dev_id;
1183 	struct net_device *dev = queue->info->netdev;
1184 
1185 	if (likely(netif_carrier_ok(dev) &&
1186 		   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1187 		napi_schedule(&queue->napi);
1188 
1189 	return IRQ_HANDLED;
1190 }
1191 
1192 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1193 {
1194 	xennet_tx_interrupt(irq, dev_id);
1195 	xennet_rx_interrupt(irq, dev_id);
1196 	return IRQ_HANDLED;
1197 }
1198 
1199 #ifdef CONFIG_NET_POLL_CONTROLLER
1200 static void xennet_poll_controller(struct net_device *dev)
1201 {
1202 	/* Poll each queue */
1203 	struct netfront_info *info = netdev_priv(dev);
1204 	unsigned int num_queues = dev->real_num_tx_queues;
1205 	unsigned int i;
1206 	for (i = 0; i < num_queues; ++i)
1207 		xennet_interrupt(0, &info->queues[i]);
1208 }
1209 #endif
1210 
1211 static const struct net_device_ops xennet_netdev_ops = {
1212 	.ndo_open            = xennet_open,
1213 	.ndo_stop            = xennet_close,
1214 	.ndo_start_xmit      = xennet_start_xmit,
1215 	.ndo_change_mtu	     = xennet_change_mtu,
1216 	.ndo_get_stats64     = xennet_get_stats64,
1217 	.ndo_set_mac_address = eth_mac_addr,
1218 	.ndo_validate_addr   = eth_validate_addr,
1219 	.ndo_fix_features    = xennet_fix_features,
1220 	.ndo_set_features    = xennet_set_features,
1221 	.ndo_select_queue    = xennet_select_queue,
1222 #ifdef CONFIG_NET_POLL_CONTROLLER
1223 	.ndo_poll_controller = xennet_poll_controller,
1224 #endif
1225 };
1226 
1227 static void xennet_free_netdev(struct net_device *netdev)
1228 {
1229 	struct netfront_info *np = netdev_priv(netdev);
1230 
1231 	free_percpu(np->rx_stats);
1232 	free_percpu(np->tx_stats);
1233 	free_netdev(netdev);
1234 }
1235 
1236 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1237 {
1238 	int err;
1239 	struct net_device *netdev;
1240 	struct netfront_info *np;
1241 
1242 	netdev = alloc_etherdev_mq(sizeof(struct netfront_info), xennet_max_queues);
1243 	if (!netdev)
1244 		return ERR_PTR(-ENOMEM);
1245 
1246 	np                   = netdev_priv(netdev);
1247 	np->xbdev            = dev;
1248 
1249 	np->queues = NULL;
1250 
1251 	err = -ENOMEM;
1252 	np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1253 	if (np->rx_stats == NULL)
1254 		goto exit;
1255 	np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
1256 	if (np->tx_stats == NULL)
1257 		goto exit;
1258 
1259 	netdev->netdev_ops	= &xennet_netdev_ops;
1260 
1261 	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1262 				  NETIF_F_GSO_ROBUST;
1263 	netdev->hw_features	= NETIF_F_SG |
1264 				  NETIF_F_IPV6_CSUM |
1265 				  NETIF_F_TSO | NETIF_F_TSO6;
1266 
1267 	/*
1268          * Assume that all hw features are available for now. This set
1269          * will be adjusted by the call to netdev_update_features() in
1270          * xennet_connect() which is the earliest point where we can
1271          * negotiate with the backend regarding supported features.
1272          */
1273 	netdev->features |= netdev->hw_features;
1274 
1275 	netdev->ethtool_ops = &xennet_ethtool_ops;
1276 	SET_NETDEV_DEV(netdev, &dev->dev);
1277 
1278 	np->netdev = netdev;
1279 
1280 	netif_carrier_off(netdev);
1281 
1282 	return netdev;
1283 
1284  exit:
1285 	xennet_free_netdev(netdev);
1286 	return ERR_PTR(err);
1287 }
1288 
1289 /**
1290  * Entry point to this code when a new device is created.  Allocate the basic
1291  * structures and the ring buffers for communication with the backend, and
1292  * inform the backend of the appropriate details for those.
1293  */
1294 static int netfront_probe(struct xenbus_device *dev,
1295 			  const struct xenbus_device_id *id)
1296 {
1297 	int err;
1298 	struct net_device *netdev;
1299 	struct netfront_info *info;
1300 
1301 	netdev = xennet_create_dev(dev);
1302 	if (IS_ERR(netdev)) {
1303 		err = PTR_ERR(netdev);
1304 		xenbus_dev_fatal(dev, err, "creating netdev");
1305 		return err;
1306 	}
1307 
1308 	info = netdev_priv(netdev);
1309 	dev_set_drvdata(&dev->dev, info);
1310 #ifdef CONFIG_SYSFS
1311 	info->netdev->sysfs_groups[0] = &xennet_dev_group;
1312 #endif
1313 	err = register_netdev(info->netdev);
1314 	if (err) {
1315 		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1316 		goto fail;
1317 	}
1318 
1319 	return 0;
1320 
1321  fail:
1322 	xennet_free_netdev(netdev);
1323 	dev_set_drvdata(&dev->dev, NULL);
1324 	return err;
1325 }
1326 
1327 static void xennet_end_access(int ref, void *page)
1328 {
1329 	/* This frees the page as a side-effect */
1330 	if (ref != GRANT_INVALID_REF)
1331 		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1332 }
1333 
1334 static void xennet_disconnect_backend(struct netfront_info *info)
1335 {
1336 	unsigned int i = 0;
1337 	unsigned int num_queues = info->netdev->real_num_tx_queues;
1338 
1339 	netif_carrier_off(info->netdev);
1340 
1341 	for (i = 0; i < num_queues && info->queues; ++i) {
1342 		struct netfront_queue *queue = &info->queues[i];
1343 
1344 		if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1345 			unbind_from_irqhandler(queue->tx_irq, queue);
1346 		if (queue->tx_irq && (queue->tx_irq != queue->rx_irq)) {
1347 			unbind_from_irqhandler(queue->tx_irq, queue);
1348 			unbind_from_irqhandler(queue->rx_irq, queue);
1349 		}
1350 		queue->tx_evtchn = queue->rx_evtchn = 0;
1351 		queue->tx_irq = queue->rx_irq = 0;
1352 
1353 		if (netif_running(info->netdev))
1354 			napi_synchronize(&queue->napi);
1355 
1356 		xennet_release_tx_bufs(queue);
1357 		xennet_release_rx_bufs(queue);
1358 		gnttab_free_grant_references(queue->gref_tx_head);
1359 		gnttab_free_grant_references(queue->gref_rx_head);
1360 
1361 		/* End access and free the pages */
1362 		xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1363 		xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
1364 
1365 		queue->tx_ring_ref = GRANT_INVALID_REF;
1366 		queue->rx_ring_ref = GRANT_INVALID_REF;
1367 		queue->tx.sring = NULL;
1368 		queue->rx.sring = NULL;
1369 	}
1370 }
1371 
1372 /**
1373  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1374  * driver restart.  We tear down our netif structure and recreate it, but
1375  * leave the device-layer structures intact so that this is transparent to the
1376  * rest of the kernel.
1377  */
1378 static int netfront_resume(struct xenbus_device *dev)
1379 {
1380 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1381 
1382 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1383 
1384 	xennet_disconnect_backend(info);
1385 	return 0;
1386 }
1387 
1388 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1389 {
1390 	char *s, *e, *macstr;
1391 	int i;
1392 
1393 	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1394 	if (IS_ERR(macstr))
1395 		return PTR_ERR(macstr);
1396 
1397 	for (i = 0; i < ETH_ALEN; i++) {
1398 		mac[i] = simple_strtoul(s, &e, 16);
1399 		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1400 			kfree(macstr);
1401 			return -ENOENT;
1402 		}
1403 		s = e+1;
1404 	}
1405 
1406 	kfree(macstr);
1407 	return 0;
1408 }
1409 
1410 static int setup_netfront_single(struct netfront_queue *queue)
1411 {
1412 	int err;
1413 
1414 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1415 	if (err < 0)
1416 		goto fail;
1417 
1418 	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1419 					xennet_interrupt,
1420 					0, queue->info->netdev->name, queue);
1421 	if (err < 0)
1422 		goto bind_fail;
1423 	queue->rx_evtchn = queue->tx_evtchn;
1424 	queue->rx_irq = queue->tx_irq = err;
1425 
1426 	return 0;
1427 
1428 bind_fail:
1429 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1430 	queue->tx_evtchn = 0;
1431 fail:
1432 	return err;
1433 }
1434 
1435 static int setup_netfront_split(struct netfront_queue *queue)
1436 {
1437 	int err;
1438 
1439 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->tx_evtchn);
1440 	if (err < 0)
1441 		goto fail;
1442 	err = xenbus_alloc_evtchn(queue->info->xbdev, &queue->rx_evtchn);
1443 	if (err < 0)
1444 		goto alloc_rx_evtchn_fail;
1445 
1446 	snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1447 		 "%s-tx", queue->name);
1448 	err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1449 					xennet_tx_interrupt,
1450 					0, queue->tx_irq_name, queue);
1451 	if (err < 0)
1452 		goto bind_tx_fail;
1453 	queue->tx_irq = err;
1454 
1455 	snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1456 		 "%s-rx", queue->name);
1457 	err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1458 					xennet_rx_interrupt,
1459 					0, queue->rx_irq_name, queue);
1460 	if (err < 0)
1461 		goto bind_rx_fail;
1462 	queue->rx_irq = err;
1463 
1464 	return 0;
1465 
1466 bind_rx_fail:
1467 	unbind_from_irqhandler(queue->tx_irq, queue);
1468 	queue->tx_irq = 0;
1469 bind_tx_fail:
1470 	xenbus_free_evtchn(queue->info->xbdev, queue->rx_evtchn);
1471 	queue->rx_evtchn = 0;
1472 alloc_rx_evtchn_fail:
1473 	xenbus_free_evtchn(queue->info->xbdev, queue->tx_evtchn);
1474 	queue->tx_evtchn = 0;
1475 fail:
1476 	return err;
1477 }
1478 
1479 static int setup_netfront(struct xenbus_device *dev,
1480 			struct netfront_queue *queue, unsigned int feature_split_evtchn)
1481 {
1482 	struct xen_netif_tx_sring *txs;
1483 	struct xen_netif_rx_sring *rxs;
1484 	grant_ref_t gref;
1485 	int err;
1486 
1487 	queue->tx_ring_ref = GRANT_INVALID_REF;
1488 	queue->rx_ring_ref = GRANT_INVALID_REF;
1489 	queue->rx.sring = NULL;
1490 	queue->tx.sring = NULL;
1491 
1492 	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1493 	if (!txs) {
1494 		err = -ENOMEM;
1495 		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1496 		goto fail;
1497 	}
1498 	SHARED_RING_INIT(txs);
1499 	FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1500 
1501 	err = xenbus_grant_ring(dev, txs, 1, &gref);
1502 	if (err < 0)
1503 		goto grant_tx_ring_fail;
1504 	queue->tx_ring_ref = gref;
1505 
1506 	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1507 	if (!rxs) {
1508 		err = -ENOMEM;
1509 		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1510 		goto alloc_rx_ring_fail;
1511 	}
1512 	SHARED_RING_INIT(rxs);
1513 	FRONT_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1514 
1515 	err = xenbus_grant_ring(dev, rxs, 1, &gref);
1516 	if (err < 0)
1517 		goto grant_rx_ring_fail;
1518 	queue->rx_ring_ref = gref;
1519 
1520 	if (feature_split_evtchn)
1521 		err = setup_netfront_split(queue);
1522 	/* setup single event channel if
1523 	 *  a) feature-split-event-channels == 0
1524 	 *  b) feature-split-event-channels == 1 but failed to setup
1525 	 */
1526 	if (!feature_split_evtchn || (feature_split_evtchn && err))
1527 		err = setup_netfront_single(queue);
1528 
1529 	if (err)
1530 		goto alloc_evtchn_fail;
1531 
1532 	return 0;
1533 
1534 	/* If we fail to setup netfront, it is safe to just revoke access to
1535 	 * granted pages because backend is not accessing it at this point.
1536 	 */
1537 alloc_evtchn_fail:
1538 	gnttab_end_foreign_access_ref(queue->rx_ring_ref, 0);
1539 grant_rx_ring_fail:
1540 	free_page((unsigned long)rxs);
1541 alloc_rx_ring_fail:
1542 	gnttab_end_foreign_access_ref(queue->tx_ring_ref, 0);
1543 grant_tx_ring_fail:
1544 	free_page((unsigned long)txs);
1545 fail:
1546 	return err;
1547 }
1548 
1549 /* Queue-specific initialisation
1550  * This used to be done in xennet_create_dev() but must now
1551  * be run per-queue.
1552  */
1553 static int xennet_init_queue(struct netfront_queue *queue)
1554 {
1555 	unsigned short i;
1556 	int err = 0;
1557 
1558 	spin_lock_init(&queue->tx_lock);
1559 	spin_lock_init(&queue->rx_lock);
1560 
1561 	setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
1562 		    (unsigned long)queue);
1563 
1564 	snprintf(queue->name, sizeof(queue->name), "%s-q%u",
1565 		 queue->info->netdev->name, queue->id);
1566 
1567 	/* Initialise tx_skbs as a free chain containing every entry. */
1568 	queue->tx_skb_freelist = 0;
1569 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1570 		skb_entry_set_link(&queue->tx_skbs[i], i+1);
1571 		queue->grant_tx_ref[i] = GRANT_INVALID_REF;
1572 		queue->grant_tx_page[i] = NULL;
1573 	}
1574 
1575 	/* Clear out rx_skbs */
1576 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1577 		queue->rx_skbs[i] = NULL;
1578 		queue->grant_rx_ref[i] = GRANT_INVALID_REF;
1579 	}
1580 
1581 	/* A grant for every tx ring slot */
1582 	if (gnttab_alloc_grant_references(NET_TX_RING_SIZE,
1583 					  &queue->gref_tx_head) < 0) {
1584 		pr_alert("can't alloc tx grant refs\n");
1585 		err = -ENOMEM;
1586 		goto exit;
1587 	}
1588 
1589 	/* A grant for every rx ring slot */
1590 	if (gnttab_alloc_grant_references(NET_RX_RING_SIZE,
1591 					  &queue->gref_rx_head) < 0) {
1592 		pr_alert("can't alloc rx grant refs\n");
1593 		err = -ENOMEM;
1594 		goto exit_free_tx;
1595 	}
1596 
1597 	return 0;
1598 
1599  exit_free_tx:
1600 	gnttab_free_grant_references(queue->gref_tx_head);
1601  exit:
1602 	return err;
1603 }
1604 
1605 static int write_queue_xenstore_keys(struct netfront_queue *queue,
1606 			   struct xenbus_transaction *xbt, int write_hierarchical)
1607 {
1608 	/* Write the queue-specific keys into XenStore in the traditional
1609 	 * way for a single queue, or in a queue subkeys for multiple
1610 	 * queues.
1611 	 */
1612 	struct xenbus_device *dev = queue->info->xbdev;
1613 	int err;
1614 	const char *message;
1615 	char *path;
1616 	size_t pathsize;
1617 
1618 	/* Choose the correct place to write the keys */
1619 	if (write_hierarchical) {
1620 		pathsize = strlen(dev->nodename) + 10;
1621 		path = kzalloc(pathsize, GFP_KERNEL);
1622 		if (!path) {
1623 			err = -ENOMEM;
1624 			message = "out of memory while writing ring references";
1625 			goto error;
1626 		}
1627 		snprintf(path, pathsize, "%s/queue-%u",
1628 				dev->nodename, queue->id);
1629 	} else {
1630 		path = (char *)dev->nodename;
1631 	}
1632 
1633 	/* Write ring references */
1634 	err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u",
1635 			queue->tx_ring_ref);
1636 	if (err) {
1637 		message = "writing tx-ring-ref";
1638 		goto error;
1639 	}
1640 
1641 	err = xenbus_printf(*xbt, path, "rx-ring-ref", "%u",
1642 			queue->rx_ring_ref);
1643 	if (err) {
1644 		message = "writing rx-ring-ref";
1645 		goto error;
1646 	}
1647 
1648 	/* Write event channels; taking into account both shared
1649 	 * and split event channel scenarios.
1650 	 */
1651 	if (queue->tx_evtchn == queue->rx_evtchn) {
1652 		/* Shared event channel */
1653 		err = xenbus_printf(*xbt, path,
1654 				"event-channel", "%u", queue->tx_evtchn);
1655 		if (err) {
1656 			message = "writing event-channel";
1657 			goto error;
1658 		}
1659 	} else {
1660 		/* Split event channels */
1661 		err = xenbus_printf(*xbt, path,
1662 				"event-channel-tx", "%u", queue->tx_evtchn);
1663 		if (err) {
1664 			message = "writing event-channel-tx";
1665 			goto error;
1666 		}
1667 
1668 		err = xenbus_printf(*xbt, path,
1669 				"event-channel-rx", "%u", queue->rx_evtchn);
1670 		if (err) {
1671 			message = "writing event-channel-rx";
1672 			goto error;
1673 		}
1674 	}
1675 
1676 	if (write_hierarchical)
1677 		kfree(path);
1678 	return 0;
1679 
1680 error:
1681 	if (write_hierarchical)
1682 		kfree(path);
1683 	xenbus_dev_fatal(dev, err, "%s", message);
1684 	return err;
1685 }
1686 
1687 static void xennet_destroy_queues(struct netfront_info *info)
1688 {
1689 	unsigned int i;
1690 
1691 	rtnl_lock();
1692 
1693 	for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1694 		struct netfront_queue *queue = &info->queues[i];
1695 
1696 		if (netif_running(info->netdev))
1697 			napi_disable(&queue->napi);
1698 		del_timer_sync(&queue->rx_refill_timer);
1699 		netif_napi_del(&queue->napi);
1700 	}
1701 
1702 	rtnl_unlock();
1703 
1704 	kfree(info->queues);
1705 	info->queues = NULL;
1706 }
1707 
1708 static int xennet_create_queues(struct netfront_info *info,
1709 				unsigned int *num_queues)
1710 {
1711 	unsigned int i;
1712 	int ret;
1713 
1714 	info->queues = kcalloc(*num_queues, sizeof(struct netfront_queue),
1715 			       GFP_KERNEL);
1716 	if (!info->queues)
1717 		return -ENOMEM;
1718 
1719 	rtnl_lock();
1720 
1721 	for (i = 0; i < *num_queues; i++) {
1722 		struct netfront_queue *queue = &info->queues[i];
1723 
1724 		queue->id = i;
1725 		queue->info = info;
1726 
1727 		ret = xennet_init_queue(queue);
1728 		if (ret < 0) {
1729 			dev_warn(&info->netdev->dev,
1730 				 "only created %d queues\n", i);
1731 			*num_queues = i;
1732 			break;
1733 		}
1734 
1735 		netif_napi_add(queue->info->netdev, &queue->napi,
1736 			       xennet_poll, 64);
1737 		if (netif_running(info->netdev))
1738 			napi_enable(&queue->napi);
1739 	}
1740 
1741 	netif_set_real_num_tx_queues(info->netdev, *num_queues);
1742 
1743 	rtnl_unlock();
1744 
1745 	if (*num_queues == 0) {
1746 		dev_err(&info->netdev->dev, "no queues\n");
1747 		return -EINVAL;
1748 	}
1749 	return 0;
1750 }
1751 
1752 /* Common code used when first setting up, and when resuming. */
1753 static int talk_to_netback(struct xenbus_device *dev,
1754 			   struct netfront_info *info)
1755 {
1756 	const char *message;
1757 	struct xenbus_transaction xbt;
1758 	int err;
1759 	unsigned int feature_split_evtchn;
1760 	unsigned int i = 0;
1761 	unsigned int max_queues = 0;
1762 	struct netfront_queue *queue = NULL;
1763 	unsigned int num_queues = 1;
1764 
1765 	info->netdev->irq = 0;
1766 
1767 	/* Check if backend supports multiple queues */
1768 	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1769 			   "multi-queue-max-queues", "%u", &max_queues);
1770 	if (err < 0)
1771 		max_queues = 1;
1772 	num_queues = min(max_queues, xennet_max_queues);
1773 
1774 	/* Check feature-split-event-channels */
1775 	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1776 			   "feature-split-event-channels", "%u",
1777 			   &feature_split_evtchn);
1778 	if (err < 0)
1779 		feature_split_evtchn = 0;
1780 
1781 	/* Read mac addr. */
1782 	err = xen_net_read_mac(dev, info->netdev->dev_addr);
1783 	if (err) {
1784 		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1785 		goto out;
1786 	}
1787 
1788 	if (info->queues)
1789 		xennet_destroy_queues(info);
1790 
1791 	err = xennet_create_queues(info, &num_queues);
1792 	if (err < 0)
1793 		goto destroy_ring;
1794 
1795 	/* Create shared ring, alloc event channel -- for each queue */
1796 	for (i = 0; i < num_queues; ++i) {
1797 		queue = &info->queues[i];
1798 		err = setup_netfront(dev, queue, feature_split_evtchn);
1799 		if (err) {
1800 			/* setup_netfront() will tidy up the current
1801 			 * queue on error, but we need to clean up
1802 			 * those already allocated.
1803 			 */
1804 			if (i > 0) {
1805 				rtnl_lock();
1806 				netif_set_real_num_tx_queues(info->netdev, i);
1807 				rtnl_unlock();
1808 				goto destroy_ring;
1809 			} else {
1810 				goto out;
1811 			}
1812 		}
1813 	}
1814 
1815 again:
1816 	err = xenbus_transaction_start(&xbt);
1817 	if (err) {
1818 		xenbus_dev_fatal(dev, err, "starting transaction");
1819 		goto destroy_ring;
1820 	}
1821 
1822 	if (num_queues == 1) {
1823 		err = write_queue_xenstore_keys(&info->queues[0], &xbt, 0); /* flat */
1824 		if (err)
1825 			goto abort_transaction_no_dev_fatal;
1826 	} else {
1827 		/* Write the number of queues */
1828 		err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues",
1829 				    "%u", num_queues);
1830 		if (err) {
1831 			message = "writing multi-queue-num-queues";
1832 			goto abort_transaction_no_dev_fatal;
1833 		}
1834 
1835 		/* Write the keys for each queue */
1836 		for (i = 0; i < num_queues; ++i) {
1837 			queue = &info->queues[i];
1838 			err = write_queue_xenstore_keys(queue, &xbt, 1); /* hierarchical */
1839 			if (err)
1840 				goto abort_transaction_no_dev_fatal;
1841 		}
1842 	}
1843 
1844 	/* The remaining keys are not queue-specific */
1845 	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1846 			    1);
1847 	if (err) {
1848 		message = "writing request-rx-copy";
1849 		goto abort_transaction;
1850 	}
1851 
1852 	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1853 	if (err) {
1854 		message = "writing feature-rx-notify";
1855 		goto abort_transaction;
1856 	}
1857 
1858 	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1859 	if (err) {
1860 		message = "writing feature-sg";
1861 		goto abort_transaction;
1862 	}
1863 
1864 	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1865 	if (err) {
1866 		message = "writing feature-gso-tcpv4";
1867 		goto abort_transaction;
1868 	}
1869 
1870 	err = xenbus_write(xbt, dev->nodename, "feature-gso-tcpv6", "1");
1871 	if (err) {
1872 		message = "writing feature-gso-tcpv6";
1873 		goto abort_transaction;
1874 	}
1875 
1876 	err = xenbus_write(xbt, dev->nodename, "feature-ipv6-csum-offload",
1877 			   "1");
1878 	if (err) {
1879 		message = "writing feature-ipv6-csum-offload";
1880 		goto abort_transaction;
1881 	}
1882 
1883 	err = xenbus_transaction_end(xbt, 0);
1884 	if (err) {
1885 		if (err == -EAGAIN)
1886 			goto again;
1887 		xenbus_dev_fatal(dev, err, "completing transaction");
1888 		goto destroy_ring;
1889 	}
1890 
1891 	return 0;
1892 
1893  abort_transaction:
1894 	xenbus_dev_fatal(dev, err, "%s", message);
1895 abort_transaction_no_dev_fatal:
1896 	xenbus_transaction_end(xbt, 1);
1897  destroy_ring:
1898 	xennet_disconnect_backend(info);
1899 	kfree(info->queues);
1900 	info->queues = NULL;
1901  out:
1902 	return err;
1903 }
1904 
1905 static int xennet_connect(struct net_device *dev)
1906 {
1907 	struct netfront_info *np = netdev_priv(dev);
1908 	unsigned int num_queues = 0;
1909 	int err;
1910 	unsigned int feature_rx_copy;
1911 	unsigned int j = 0;
1912 	struct netfront_queue *queue = NULL;
1913 
1914 	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1915 			   "feature-rx-copy", "%u", &feature_rx_copy);
1916 	if (err != 1)
1917 		feature_rx_copy = 0;
1918 
1919 	if (!feature_rx_copy) {
1920 		dev_info(&dev->dev,
1921 			 "backend does not support copying receive path\n");
1922 		return -ENODEV;
1923 	}
1924 
1925 	err = talk_to_netback(np->xbdev, np);
1926 	if (err)
1927 		return err;
1928 
1929 	/* talk_to_netback() sets the correct number of queues */
1930 	num_queues = dev->real_num_tx_queues;
1931 
1932 	rtnl_lock();
1933 	netdev_update_features(dev);
1934 	rtnl_unlock();
1935 
1936 	/*
1937 	 * All public and private state should now be sane.  Get
1938 	 * ready to start sending and receiving packets and give the driver
1939 	 * domain a kick because we've probably just requeued some
1940 	 * packets.
1941 	 */
1942 	netif_carrier_on(np->netdev);
1943 	for (j = 0; j < num_queues; ++j) {
1944 		queue = &np->queues[j];
1945 
1946 		notify_remote_via_irq(queue->tx_irq);
1947 		if (queue->tx_irq != queue->rx_irq)
1948 			notify_remote_via_irq(queue->rx_irq);
1949 
1950 		spin_lock_irq(&queue->tx_lock);
1951 		xennet_tx_buf_gc(queue);
1952 		spin_unlock_irq(&queue->tx_lock);
1953 
1954 		spin_lock_bh(&queue->rx_lock);
1955 		xennet_alloc_rx_buffers(queue);
1956 		spin_unlock_bh(&queue->rx_lock);
1957 	}
1958 
1959 	return 0;
1960 }
1961 
1962 /**
1963  * Callback received when the backend's state changes.
1964  */
1965 static void netback_changed(struct xenbus_device *dev,
1966 			    enum xenbus_state backend_state)
1967 {
1968 	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1969 	struct net_device *netdev = np->netdev;
1970 
1971 	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1972 
1973 	switch (backend_state) {
1974 	case XenbusStateInitialising:
1975 	case XenbusStateInitialised:
1976 	case XenbusStateReconfiguring:
1977 	case XenbusStateReconfigured:
1978 	case XenbusStateUnknown:
1979 		break;
1980 
1981 	case XenbusStateInitWait:
1982 		if (dev->state != XenbusStateInitialising)
1983 			break;
1984 		if (xennet_connect(netdev) != 0)
1985 			break;
1986 		xenbus_switch_state(dev, XenbusStateConnected);
1987 		break;
1988 
1989 	case XenbusStateConnected:
1990 		netdev_notify_peers(netdev);
1991 		break;
1992 
1993 	case XenbusStateClosed:
1994 		if (dev->state == XenbusStateClosed)
1995 			break;
1996 		/* Missed the backend's CLOSING state -- fallthrough */
1997 	case XenbusStateClosing:
1998 		xenbus_frontend_closed(dev);
1999 		break;
2000 	}
2001 }
2002 
2003 static const struct xennet_stat {
2004 	char name[ETH_GSTRING_LEN];
2005 	u16 offset;
2006 } xennet_stats[] = {
2007 	{
2008 		"rx_gso_checksum_fixup",
2009 		offsetof(struct netfront_info, rx_gso_checksum_fixup)
2010 	},
2011 };
2012 
2013 static int xennet_get_sset_count(struct net_device *dev, int string_set)
2014 {
2015 	switch (string_set) {
2016 	case ETH_SS_STATS:
2017 		return ARRAY_SIZE(xennet_stats);
2018 	default:
2019 		return -EINVAL;
2020 	}
2021 }
2022 
2023 static void xennet_get_ethtool_stats(struct net_device *dev,
2024 				     struct ethtool_stats *stats, u64 * data)
2025 {
2026 	void *np = netdev_priv(dev);
2027 	int i;
2028 
2029 	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2030 		data[i] = atomic_read((atomic_t *)(np + xennet_stats[i].offset));
2031 }
2032 
2033 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
2034 {
2035 	int i;
2036 
2037 	switch (stringset) {
2038 	case ETH_SS_STATS:
2039 		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
2040 			memcpy(data + i * ETH_GSTRING_LEN,
2041 			       xennet_stats[i].name, ETH_GSTRING_LEN);
2042 		break;
2043 	}
2044 }
2045 
2046 static const struct ethtool_ops xennet_ethtool_ops =
2047 {
2048 	.get_link = ethtool_op_get_link,
2049 
2050 	.get_sset_count = xennet_get_sset_count,
2051 	.get_ethtool_stats = xennet_get_ethtool_stats,
2052 	.get_strings = xennet_get_strings,
2053 };
2054 
2055 #ifdef CONFIG_SYSFS
2056 static ssize_t show_rxbuf(struct device *dev,
2057 			  struct device_attribute *attr, char *buf)
2058 {
2059 	return sprintf(buf, "%lu\n", NET_RX_RING_SIZE);
2060 }
2061 
2062 static ssize_t store_rxbuf(struct device *dev,
2063 			   struct device_attribute *attr,
2064 			   const char *buf, size_t len)
2065 {
2066 	char *endp;
2067 	unsigned long target;
2068 
2069 	if (!capable(CAP_NET_ADMIN))
2070 		return -EPERM;
2071 
2072 	target = simple_strtoul(buf, &endp, 0);
2073 	if (endp == buf)
2074 		return -EBADMSG;
2075 
2076 	/* rxbuf_min and rxbuf_max are no longer configurable. */
2077 
2078 	return len;
2079 }
2080 
2081 static DEVICE_ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2082 static DEVICE_ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf, store_rxbuf);
2083 static DEVICE_ATTR(rxbuf_cur, S_IRUGO, show_rxbuf, NULL);
2084 
2085 static struct attribute *xennet_dev_attrs[] = {
2086 	&dev_attr_rxbuf_min.attr,
2087 	&dev_attr_rxbuf_max.attr,
2088 	&dev_attr_rxbuf_cur.attr,
2089 	NULL
2090 };
2091 
2092 static const struct attribute_group xennet_dev_group = {
2093 	.attrs = xennet_dev_attrs
2094 };
2095 #endif /* CONFIG_SYSFS */
2096 
2097 static int xennet_remove(struct xenbus_device *dev)
2098 {
2099 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2100 
2101 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2102 
2103 	xennet_disconnect_backend(info);
2104 
2105 	unregister_netdev(info->netdev);
2106 
2107 	if (info->queues)
2108 		xennet_destroy_queues(info);
2109 	xennet_free_netdev(info->netdev);
2110 
2111 	return 0;
2112 }
2113 
2114 static const struct xenbus_device_id netfront_ids[] = {
2115 	{ "vif" },
2116 	{ "" }
2117 };
2118 
2119 static struct xenbus_driver netfront_driver = {
2120 	.ids = netfront_ids,
2121 	.probe = netfront_probe,
2122 	.remove = xennet_remove,
2123 	.resume = netfront_resume,
2124 	.otherend_changed = netback_changed,
2125 };
2126 
2127 static int __init netif_init(void)
2128 {
2129 	if (!xen_domain())
2130 		return -ENODEV;
2131 
2132 	if (!xen_has_pv_nic_devices())
2133 		return -ENODEV;
2134 
2135 	pr_info("Initialising Xen virtual ethernet driver\n");
2136 
2137 	/* Allow as many queues as there are CPUs if user has not
2138 	 * specified a value.
2139 	 */
2140 	if (xennet_max_queues == 0)
2141 		xennet_max_queues = num_online_cpus();
2142 
2143 	return xenbus_register_frontend(&netfront_driver);
2144 }
2145 module_init(netif_init);
2146 
2147 
2148 static void __exit netif_exit(void)
2149 {
2150 	xenbus_unregister_driver(&netfront_driver);
2151 }
2152 module_exit(netif_exit);
2153 
2154 MODULE_DESCRIPTION("Xen virtual network device frontend");
2155 MODULE_LICENSE("GPL");
2156 MODULE_ALIAS("xen:vif");
2157 MODULE_ALIAS("xennet");
2158