xref: /linux/drivers/net/xen-netfront.c (revision d458cdf712e0c671e8e819abb16ecd6e44f9daec)
1 /*
2  * Virtual network driver for conversing with remote driver backends.
3  *
4  * Copyright (c) 2002-2005, K A Fraser
5  * Copyright (c) 2005, XenSource Ltd
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version 2
9  * as published by the Free Software Foundation; or, when distributed
10  * separately from the Linux kernel or incorporated into other
11  * software packages, subject to the following license:
12  *
13  * Permission is hereby granted, free of charge, to any person obtaining a copy
14  * of this source file (the "Software"), to deal in the Software without
15  * restriction, including without limitation the rights to use, copy, modify,
16  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
17  * and to permit persons to whom the Software is furnished to do so, subject to
18  * the following conditions:
19  *
20  * The above copyright notice and this permission notice shall be included in
21  * all copies or substantial portions of the Software.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29  * IN THE SOFTWARE.
30  */
31 
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 
34 #include <linux/module.h>
35 #include <linux/kernel.h>
36 #include <linux/netdevice.h>
37 #include <linux/etherdevice.h>
38 #include <linux/skbuff.h>
39 #include <linux/ethtool.h>
40 #include <linux/if_ether.h>
41 #include <net/tcp.h>
42 #include <linux/udp.h>
43 #include <linux/moduleparam.h>
44 #include <linux/mm.h>
45 #include <linux/slab.h>
46 #include <net/ip.h>
47 
48 #include <asm/xen/page.h>
49 #include <xen/xen.h>
50 #include <xen/xenbus.h>
51 #include <xen/events.h>
52 #include <xen/page.h>
53 #include <xen/platform_pci.h>
54 #include <xen/grant_table.h>
55 
56 #include <xen/interface/io/netif.h>
57 #include <xen/interface/memory.h>
58 #include <xen/interface/grant_table.h>
59 
60 static const struct ethtool_ops xennet_ethtool_ops;
61 
62 struct netfront_cb {
63 	int pull_to;
64 };
65 
66 #define NETFRONT_SKB_CB(skb)	((struct netfront_cb *)((skb)->cb))
67 
68 #define RX_COPY_THRESHOLD 256
69 
70 #define GRANT_INVALID_REF	0
71 
72 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
73 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
74 #define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
75 
76 struct netfront_stats {
77 	u64			rx_packets;
78 	u64			tx_packets;
79 	u64			rx_bytes;
80 	u64			tx_bytes;
81 	struct u64_stats_sync	syncp;
82 };
83 
84 struct netfront_info {
85 	struct list_head list;
86 	struct net_device *netdev;
87 
88 	struct napi_struct napi;
89 
90 	/* Split event channels support, tx_* == rx_* when using
91 	 * single event channel.
92 	 */
93 	unsigned int tx_evtchn, rx_evtchn;
94 	unsigned int tx_irq, rx_irq;
95 	/* Only used when split event channels support is enabled */
96 	char tx_irq_name[IFNAMSIZ+4]; /* DEVNAME-tx */
97 	char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
98 
99 	struct xenbus_device *xbdev;
100 
101 	spinlock_t   tx_lock;
102 	struct xen_netif_tx_front_ring tx;
103 	int tx_ring_ref;
104 
105 	/*
106 	 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
107 	 * are linked from tx_skb_freelist through skb_entry.link.
108 	 *
109 	 *  NB. Freelist index entries are always going to be less than
110 	 *  PAGE_OFFSET, whereas pointers to skbs will always be equal or
111 	 *  greater than PAGE_OFFSET: we use this property to distinguish
112 	 *  them.
113 	 */
114 	union skb_entry {
115 		struct sk_buff *skb;
116 		unsigned long link;
117 	} tx_skbs[NET_TX_RING_SIZE];
118 	grant_ref_t gref_tx_head;
119 	grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
120 	unsigned tx_skb_freelist;
121 
122 	spinlock_t   rx_lock ____cacheline_aligned_in_smp;
123 	struct xen_netif_rx_front_ring rx;
124 	int rx_ring_ref;
125 
126 	/* Receive-ring batched refills. */
127 #define RX_MIN_TARGET 8
128 #define RX_DFL_MIN_TARGET 64
129 #define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
130 	unsigned rx_min_target, rx_max_target, rx_target;
131 	struct sk_buff_head rx_batch;
132 
133 	struct timer_list rx_refill_timer;
134 
135 	struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
136 	grant_ref_t gref_rx_head;
137 	grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
138 
139 	unsigned long rx_pfn_array[NET_RX_RING_SIZE];
140 	struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
141 	struct mmu_update rx_mmu[NET_RX_RING_SIZE];
142 
143 	/* Statistics */
144 	struct netfront_stats __percpu *stats;
145 
146 	unsigned long rx_gso_checksum_fixup;
147 };
148 
149 struct netfront_rx_info {
150 	struct xen_netif_rx_response rx;
151 	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
152 };
153 
154 static void skb_entry_set_link(union skb_entry *list, unsigned short id)
155 {
156 	list->link = id;
157 }
158 
159 static int skb_entry_is_link(const union skb_entry *list)
160 {
161 	BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
162 	return (unsigned long)list->skb < PAGE_OFFSET;
163 }
164 
165 /*
166  * Access macros for acquiring freeing slots in tx_skbs[].
167  */
168 
169 static void add_id_to_freelist(unsigned *head, union skb_entry *list,
170 			       unsigned short id)
171 {
172 	skb_entry_set_link(&list[id], *head);
173 	*head = id;
174 }
175 
176 static unsigned short get_id_from_freelist(unsigned *head,
177 					   union skb_entry *list)
178 {
179 	unsigned int id = *head;
180 	*head = list[id].link;
181 	return id;
182 }
183 
184 static int xennet_rxidx(RING_IDX idx)
185 {
186 	return idx & (NET_RX_RING_SIZE - 1);
187 }
188 
189 static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
190 					 RING_IDX ri)
191 {
192 	int i = xennet_rxidx(ri);
193 	struct sk_buff *skb = np->rx_skbs[i];
194 	np->rx_skbs[i] = NULL;
195 	return skb;
196 }
197 
198 static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
199 					    RING_IDX ri)
200 {
201 	int i = xennet_rxidx(ri);
202 	grant_ref_t ref = np->grant_rx_ref[i];
203 	np->grant_rx_ref[i] = GRANT_INVALID_REF;
204 	return ref;
205 }
206 
207 #ifdef CONFIG_SYSFS
208 static int xennet_sysfs_addif(struct net_device *netdev);
209 static void xennet_sysfs_delif(struct net_device *netdev);
210 #else /* !CONFIG_SYSFS */
211 #define xennet_sysfs_addif(dev) (0)
212 #define xennet_sysfs_delif(dev) do { } while (0)
213 #endif
214 
215 static bool xennet_can_sg(struct net_device *dev)
216 {
217 	return dev->features & NETIF_F_SG;
218 }
219 
220 
221 static void rx_refill_timeout(unsigned long data)
222 {
223 	struct net_device *dev = (struct net_device *)data;
224 	struct netfront_info *np = netdev_priv(dev);
225 	napi_schedule(&np->napi);
226 }
227 
228 static int netfront_tx_slot_available(struct netfront_info *np)
229 {
230 	return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
231 		(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
232 }
233 
234 static void xennet_maybe_wake_tx(struct net_device *dev)
235 {
236 	struct netfront_info *np = netdev_priv(dev);
237 
238 	if (unlikely(netif_queue_stopped(dev)) &&
239 	    netfront_tx_slot_available(np) &&
240 	    likely(netif_running(dev)))
241 		netif_wake_queue(dev);
242 }
243 
244 static void xennet_alloc_rx_buffers(struct net_device *dev)
245 {
246 	unsigned short id;
247 	struct netfront_info *np = netdev_priv(dev);
248 	struct sk_buff *skb;
249 	struct page *page;
250 	int i, batch_target, notify;
251 	RING_IDX req_prod = np->rx.req_prod_pvt;
252 	grant_ref_t ref;
253 	unsigned long pfn;
254 	void *vaddr;
255 	struct xen_netif_rx_request *req;
256 
257 	if (unlikely(!netif_carrier_ok(dev)))
258 		return;
259 
260 	/*
261 	 * Allocate skbuffs greedily, even though we batch updates to the
262 	 * receive ring. This creates a less bursty demand on the memory
263 	 * allocator, so should reduce the chance of failed allocation requests
264 	 * both for ourself and for other kernel subsystems.
265 	 */
266 	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
267 	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
268 		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
269 					 GFP_ATOMIC | __GFP_NOWARN);
270 		if (unlikely(!skb))
271 			goto no_skb;
272 
273 		/* Align ip header to a 16 bytes boundary */
274 		skb_reserve(skb, NET_IP_ALIGN);
275 
276 		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
277 		if (!page) {
278 			kfree_skb(skb);
279 no_skb:
280 			/* Any skbuffs queued for refill? Force them out. */
281 			if (i != 0)
282 				goto refill;
283 			/* Could not allocate any skbuffs. Try again later. */
284 			mod_timer(&np->rx_refill_timer,
285 				  jiffies + (HZ/10));
286 			break;
287 		}
288 
289 		skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
290 		__skb_queue_tail(&np->rx_batch, skb);
291 	}
292 
293 	/* Is the batch large enough to be worthwhile? */
294 	if (i < (np->rx_target/2)) {
295 		if (req_prod > np->rx.sring->req_prod)
296 			goto push;
297 		return;
298 	}
299 
300 	/* Adjust our fill target if we risked running out of buffers. */
301 	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
302 	    ((np->rx_target *= 2) > np->rx_max_target))
303 		np->rx_target = np->rx_max_target;
304 
305  refill:
306 	for (i = 0; ; i++) {
307 		skb = __skb_dequeue(&np->rx_batch);
308 		if (skb == NULL)
309 			break;
310 
311 		skb->dev = dev;
312 
313 		id = xennet_rxidx(req_prod + i);
314 
315 		BUG_ON(np->rx_skbs[id]);
316 		np->rx_skbs[id] = skb;
317 
318 		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
319 		BUG_ON((signed short)ref < 0);
320 		np->grant_rx_ref[id] = ref;
321 
322 		pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
323 		vaddr = page_address(skb_frag_page(&skb_shinfo(skb)->frags[0]));
324 
325 		req = RING_GET_REQUEST(&np->rx, req_prod + i);
326 		gnttab_grant_foreign_access_ref(ref,
327 						np->xbdev->otherend_id,
328 						pfn_to_mfn(pfn),
329 						0);
330 
331 		req->id = id;
332 		req->gref = ref;
333 	}
334 
335 	wmb();		/* barrier so backend seens requests */
336 
337 	/* Above is a suitable barrier to ensure backend will see requests. */
338 	np->rx.req_prod_pvt = req_prod + i;
339  push:
340 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
341 	if (notify)
342 		notify_remote_via_irq(np->rx_irq);
343 }
344 
345 static int xennet_open(struct net_device *dev)
346 {
347 	struct netfront_info *np = netdev_priv(dev);
348 
349 	napi_enable(&np->napi);
350 
351 	spin_lock_bh(&np->rx_lock);
352 	if (netif_carrier_ok(dev)) {
353 		xennet_alloc_rx_buffers(dev);
354 		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
355 		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
356 			napi_schedule(&np->napi);
357 	}
358 	spin_unlock_bh(&np->rx_lock);
359 
360 	netif_start_queue(dev);
361 
362 	return 0;
363 }
364 
365 static void xennet_tx_buf_gc(struct net_device *dev)
366 {
367 	RING_IDX cons, prod;
368 	unsigned short id;
369 	struct netfront_info *np = netdev_priv(dev);
370 	struct sk_buff *skb;
371 
372 	BUG_ON(!netif_carrier_ok(dev));
373 
374 	do {
375 		prod = np->tx.sring->rsp_prod;
376 		rmb(); /* Ensure we see responses up to 'rp'. */
377 
378 		for (cons = np->tx.rsp_cons; cons != prod; cons++) {
379 			struct xen_netif_tx_response *txrsp;
380 
381 			txrsp = RING_GET_RESPONSE(&np->tx, cons);
382 			if (txrsp->status == XEN_NETIF_RSP_NULL)
383 				continue;
384 
385 			id  = txrsp->id;
386 			skb = np->tx_skbs[id].skb;
387 			if (unlikely(gnttab_query_foreign_access(
388 				np->grant_tx_ref[id]) != 0)) {
389 				pr_alert("%s: warning -- grant still in use by backend domain\n",
390 					 __func__);
391 				BUG();
392 			}
393 			gnttab_end_foreign_access_ref(
394 				np->grant_tx_ref[id], GNTMAP_readonly);
395 			gnttab_release_grant_reference(
396 				&np->gref_tx_head, np->grant_tx_ref[id]);
397 			np->grant_tx_ref[id] = GRANT_INVALID_REF;
398 			add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
399 			dev_kfree_skb_irq(skb);
400 		}
401 
402 		np->tx.rsp_cons = prod;
403 
404 		/*
405 		 * Set a new event, then check for race with update of tx_cons.
406 		 * Note that it is essential to schedule a callback, no matter
407 		 * how few buffers are pending. Even if there is space in the
408 		 * transmit ring, higher layers may be blocked because too much
409 		 * data is outstanding: in such cases notification from Xen is
410 		 * likely to be the only kick that we'll get.
411 		 */
412 		np->tx.sring->rsp_event =
413 			prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
414 		mb();		/* update shared area */
415 	} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
416 
417 	xennet_maybe_wake_tx(dev);
418 }
419 
420 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
421 			      struct xen_netif_tx_request *tx)
422 {
423 	struct netfront_info *np = netdev_priv(dev);
424 	char *data = skb->data;
425 	unsigned long mfn;
426 	RING_IDX prod = np->tx.req_prod_pvt;
427 	int frags = skb_shinfo(skb)->nr_frags;
428 	unsigned int offset = offset_in_page(data);
429 	unsigned int len = skb_headlen(skb);
430 	unsigned int id;
431 	grant_ref_t ref;
432 	int i;
433 
434 	/* While the header overlaps a page boundary (including being
435 	   larger than a page), split it it into page-sized chunks. */
436 	while (len > PAGE_SIZE - offset) {
437 		tx->size = PAGE_SIZE - offset;
438 		tx->flags |= XEN_NETTXF_more_data;
439 		len -= tx->size;
440 		data += tx->size;
441 		offset = 0;
442 
443 		id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
444 		np->tx_skbs[id].skb = skb_get(skb);
445 		tx = RING_GET_REQUEST(&np->tx, prod++);
446 		tx->id = id;
447 		ref = gnttab_claim_grant_reference(&np->gref_tx_head);
448 		BUG_ON((signed short)ref < 0);
449 
450 		mfn = virt_to_mfn(data);
451 		gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
452 						mfn, GNTMAP_readonly);
453 
454 		tx->gref = np->grant_tx_ref[id] = ref;
455 		tx->offset = offset;
456 		tx->size = len;
457 		tx->flags = 0;
458 	}
459 
460 	/* Grant backend access to each skb fragment page. */
461 	for (i = 0; i < frags; i++) {
462 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
463 		struct page *page = skb_frag_page(frag);
464 
465 		len = skb_frag_size(frag);
466 		offset = frag->page_offset;
467 
468 		/* Data must not cross a page boundary. */
469 		BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
470 
471 		/* Skip unused frames from start of page */
472 		page += offset >> PAGE_SHIFT;
473 		offset &= ~PAGE_MASK;
474 
475 		while (len > 0) {
476 			unsigned long bytes;
477 
478 			BUG_ON(offset >= PAGE_SIZE);
479 
480 			bytes = PAGE_SIZE - offset;
481 			if (bytes > len)
482 				bytes = len;
483 
484 			tx->flags |= XEN_NETTXF_more_data;
485 
486 			id = get_id_from_freelist(&np->tx_skb_freelist,
487 						  np->tx_skbs);
488 			np->tx_skbs[id].skb = skb_get(skb);
489 			tx = RING_GET_REQUEST(&np->tx, prod++);
490 			tx->id = id;
491 			ref = gnttab_claim_grant_reference(&np->gref_tx_head);
492 			BUG_ON((signed short)ref < 0);
493 
494 			mfn = pfn_to_mfn(page_to_pfn(page));
495 			gnttab_grant_foreign_access_ref(ref,
496 							np->xbdev->otherend_id,
497 							mfn, GNTMAP_readonly);
498 
499 			tx->gref = np->grant_tx_ref[id] = ref;
500 			tx->offset = offset;
501 			tx->size = bytes;
502 			tx->flags = 0;
503 
504 			offset += bytes;
505 			len -= bytes;
506 
507 			/* Next frame */
508 			if (offset == PAGE_SIZE && len) {
509 				BUG_ON(!PageCompound(page));
510 				page++;
511 				offset = 0;
512 			}
513 		}
514 	}
515 
516 	np->tx.req_prod_pvt = prod;
517 }
518 
519 /*
520  * Count how many ring slots are required to send the frags of this
521  * skb. Each frag might be a compound page.
522  */
523 static int xennet_count_skb_frag_slots(struct sk_buff *skb)
524 {
525 	int i, frags = skb_shinfo(skb)->nr_frags;
526 	int pages = 0;
527 
528 	for (i = 0; i < frags; i++) {
529 		skb_frag_t *frag = skb_shinfo(skb)->frags + i;
530 		unsigned long size = skb_frag_size(frag);
531 		unsigned long offset = frag->page_offset;
532 
533 		/* Skip unused frames from start of page */
534 		offset &= ~PAGE_MASK;
535 
536 		pages += PFN_UP(offset + size);
537 	}
538 
539 	return pages;
540 }
541 
542 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
543 {
544 	unsigned short id;
545 	struct netfront_info *np = netdev_priv(dev);
546 	struct netfront_stats *stats = this_cpu_ptr(np->stats);
547 	struct xen_netif_tx_request *tx;
548 	char *data = skb->data;
549 	RING_IDX i;
550 	grant_ref_t ref;
551 	unsigned long mfn;
552 	int notify;
553 	int slots;
554 	unsigned int offset = offset_in_page(data);
555 	unsigned int len = skb_headlen(skb);
556 	unsigned long flags;
557 
558 	/* If skb->len is too big for wire format, drop skb and alert
559 	 * user about misconfiguration.
560 	 */
561 	if (unlikely(skb->len > XEN_NETIF_MAX_TX_SIZE)) {
562 		net_alert_ratelimited(
563 			"xennet: skb->len = %u, too big for wire format\n",
564 			skb->len);
565 		goto drop;
566 	}
567 
568 	slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
569 		xennet_count_skb_frag_slots(skb);
570 	if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
571 		net_alert_ratelimited(
572 			"xennet: skb rides the rocket: %d slots\n", slots);
573 		goto drop;
574 	}
575 
576 	spin_lock_irqsave(&np->tx_lock, flags);
577 
578 	if (unlikely(!netif_carrier_ok(dev) ||
579 		     (slots > 1 && !xennet_can_sg(dev)) ||
580 		     netif_needs_gso(skb, netif_skb_features(skb)))) {
581 		spin_unlock_irqrestore(&np->tx_lock, flags);
582 		goto drop;
583 	}
584 
585 	i = np->tx.req_prod_pvt;
586 
587 	id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
588 	np->tx_skbs[id].skb = skb;
589 
590 	tx = RING_GET_REQUEST(&np->tx, i);
591 
592 	tx->id   = id;
593 	ref = gnttab_claim_grant_reference(&np->gref_tx_head);
594 	BUG_ON((signed short)ref < 0);
595 	mfn = virt_to_mfn(data);
596 	gnttab_grant_foreign_access_ref(
597 		ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
598 	tx->gref = np->grant_tx_ref[id] = ref;
599 	tx->offset = offset;
600 	tx->size = len;
601 
602 	tx->flags = 0;
603 	if (skb->ip_summed == CHECKSUM_PARTIAL)
604 		/* local packet? */
605 		tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
606 	else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
607 		/* remote but checksummed. */
608 		tx->flags |= XEN_NETTXF_data_validated;
609 
610 	if (skb_shinfo(skb)->gso_size) {
611 		struct xen_netif_extra_info *gso;
612 
613 		gso = (struct xen_netif_extra_info *)
614 			RING_GET_REQUEST(&np->tx, ++i);
615 
616 		tx->flags |= XEN_NETTXF_extra_info;
617 
618 		gso->u.gso.size = skb_shinfo(skb)->gso_size;
619 		gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
620 		gso->u.gso.pad = 0;
621 		gso->u.gso.features = 0;
622 
623 		gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
624 		gso->flags = 0;
625 	}
626 
627 	np->tx.req_prod_pvt = i + 1;
628 
629 	xennet_make_frags(skb, dev, tx);
630 	tx->size = skb->len;
631 
632 	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
633 	if (notify)
634 		notify_remote_via_irq(np->tx_irq);
635 
636 	u64_stats_update_begin(&stats->syncp);
637 	stats->tx_bytes += skb->len;
638 	stats->tx_packets++;
639 	u64_stats_update_end(&stats->syncp);
640 
641 	/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
642 	xennet_tx_buf_gc(dev);
643 
644 	if (!netfront_tx_slot_available(np))
645 		netif_stop_queue(dev);
646 
647 	spin_unlock_irqrestore(&np->tx_lock, flags);
648 
649 	return NETDEV_TX_OK;
650 
651  drop:
652 	dev->stats.tx_dropped++;
653 	dev_kfree_skb(skb);
654 	return NETDEV_TX_OK;
655 }
656 
657 static int xennet_close(struct net_device *dev)
658 {
659 	struct netfront_info *np = netdev_priv(dev);
660 	netif_stop_queue(np->netdev);
661 	napi_disable(&np->napi);
662 	return 0;
663 }
664 
665 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
666 				grant_ref_t ref)
667 {
668 	int new = xennet_rxidx(np->rx.req_prod_pvt);
669 
670 	BUG_ON(np->rx_skbs[new]);
671 	np->rx_skbs[new] = skb;
672 	np->grant_rx_ref[new] = ref;
673 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
674 	RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
675 	np->rx.req_prod_pvt++;
676 }
677 
678 static int xennet_get_extras(struct netfront_info *np,
679 			     struct xen_netif_extra_info *extras,
680 			     RING_IDX rp)
681 
682 {
683 	struct xen_netif_extra_info *extra;
684 	struct device *dev = &np->netdev->dev;
685 	RING_IDX cons = np->rx.rsp_cons;
686 	int err = 0;
687 
688 	do {
689 		struct sk_buff *skb;
690 		grant_ref_t ref;
691 
692 		if (unlikely(cons + 1 == rp)) {
693 			if (net_ratelimit())
694 				dev_warn(dev, "Missing extra info\n");
695 			err = -EBADR;
696 			break;
697 		}
698 
699 		extra = (struct xen_netif_extra_info *)
700 			RING_GET_RESPONSE(&np->rx, ++cons);
701 
702 		if (unlikely(!extra->type ||
703 			     extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
704 			if (net_ratelimit())
705 				dev_warn(dev, "Invalid extra type: %d\n",
706 					extra->type);
707 			err = -EINVAL;
708 		} else {
709 			memcpy(&extras[extra->type - 1], extra,
710 			       sizeof(*extra));
711 		}
712 
713 		skb = xennet_get_rx_skb(np, cons);
714 		ref = xennet_get_rx_ref(np, cons);
715 		xennet_move_rx_slot(np, skb, ref);
716 	} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
717 
718 	np->rx.rsp_cons = cons;
719 	return err;
720 }
721 
722 static int xennet_get_responses(struct netfront_info *np,
723 				struct netfront_rx_info *rinfo, RING_IDX rp,
724 				struct sk_buff_head *list)
725 {
726 	struct xen_netif_rx_response *rx = &rinfo->rx;
727 	struct xen_netif_extra_info *extras = rinfo->extras;
728 	struct device *dev = &np->netdev->dev;
729 	RING_IDX cons = np->rx.rsp_cons;
730 	struct sk_buff *skb = xennet_get_rx_skb(np, cons);
731 	grant_ref_t ref = xennet_get_rx_ref(np, cons);
732 	int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
733 	int slots = 1;
734 	int err = 0;
735 	unsigned long ret;
736 
737 	if (rx->flags & XEN_NETRXF_extra_info) {
738 		err = xennet_get_extras(np, extras, rp);
739 		cons = np->rx.rsp_cons;
740 	}
741 
742 	for (;;) {
743 		if (unlikely(rx->status < 0 ||
744 			     rx->offset + rx->status > PAGE_SIZE)) {
745 			if (net_ratelimit())
746 				dev_warn(dev, "rx->offset: %x, size: %u\n",
747 					 rx->offset, rx->status);
748 			xennet_move_rx_slot(np, skb, ref);
749 			err = -EINVAL;
750 			goto next;
751 		}
752 
753 		/*
754 		 * This definitely indicates a bug, either in this driver or in
755 		 * the backend driver. In future this should flag the bad
756 		 * situation to the system controller to reboot the backend.
757 		 */
758 		if (ref == GRANT_INVALID_REF) {
759 			if (net_ratelimit())
760 				dev_warn(dev, "Bad rx response id %d.\n",
761 					 rx->id);
762 			err = -EINVAL;
763 			goto next;
764 		}
765 
766 		ret = gnttab_end_foreign_access_ref(ref, 0);
767 		BUG_ON(!ret);
768 
769 		gnttab_release_grant_reference(&np->gref_rx_head, ref);
770 
771 		__skb_queue_tail(list, skb);
772 
773 next:
774 		if (!(rx->flags & XEN_NETRXF_more_data))
775 			break;
776 
777 		if (cons + slots == rp) {
778 			if (net_ratelimit())
779 				dev_warn(dev, "Need more slots\n");
780 			err = -ENOENT;
781 			break;
782 		}
783 
784 		rx = RING_GET_RESPONSE(&np->rx, cons + slots);
785 		skb = xennet_get_rx_skb(np, cons + slots);
786 		ref = xennet_get_rx_ref(np, cons + slots);
787 		slots++;
788 	}
789 
790 	if (unlikely(slots > max)) {
791 		if (net_ratelimit())
792 			dev_warn(dev, "Too many slots\n");
793 		err = -E2BIG;
794 	}
795 
796 	if (unlikely(err))
797 		np->rx.rsp_cons = cons + slots;
798 
799 	return err;
800 }
801 
802 static int xennet_set_skb_gso(struct sk_buff *skb,
803 			      struct xen_netif_extra_info *gso)
804 {
805 	if (!gso->u.gso.size) {
806 		if (net_ratelimit())
807 			pr_warn("GSO size must not be zero\n");
808 		return -EINVAL;
809 	}
810 
811 	/* Currently only TCPv4 S.O. is supported. */
812 	if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
813 		if (net_ratelimit())
814 			pr_warn("Bad GSO type %d\n", gso->u.gso.type);
815 		return -EINVAL;
816 	}
817 
818 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
819 	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
820 
821 	/* Header must be checked, and gso_segs computed. */
822 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
823 	skb_shinfo(skb)->gso_segs = 0;
824 
825 	return 0;
826 }
827 
828 static RING_IDX xennet_fill_frags(struct netfront_info *np,
829 				  struct sk_buff *skb,
830 				  struct sk_buff_head *list)
831 {
832 	struct skb_shared_info *shinfo = skb_shinfo(skb);
833 	RING_IDX cons = np->rx.rsp_cons;
834 	struct sk_buff *nskb;
835 
836 	while ((nskb = __skb_dequeue(list))) {
837 		struct xen_netif_rx_response *rx =
838 			RING_GET_RESPONSE(&np->rx, ++cons);
839 		skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
840 
841 		if (shinfo->nr_frags == MAX_SKB_FRAGS) {
842 			unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
843 
844 			BUG_ON(pull_to <= skb_headlen(skb));
845 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
846 		}
847 		BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
848 
849 		skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
850 				rx->offset, rx->status, PAGE_SIZE);
851 
852 		skb_shinfo(nskb)->nr_frags = 0;
853 		kfree_skb(nskb);
854 	}
855 
856 	return cons;
857 }
858 
859 static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
860 {
861 	struct iphdr *iph;
862 	int err = -EPROTO;
863 	int recalculate_partial_csum = 0;
864 
865 	/*
866 	 * A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
867 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
868 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
869 	 * recalculate the partial checksum.
870 	 */
871 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
872 		struct netfront_info *np = netdev_priv(dev);
873 		np->rx_gso_checksum_fixup++;
874 		skb->ip_summed = CHECKSUM_PARTIAL;
875 		recalculate_partial_csum = 1;
876 	}
877 
878 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
879 	if (skb->ip_summed != CHECKSUM_PARTIAL)
880 		return 0;
881 
882 	if (skb->protocol != htons(ETH_P_IP))
883 		goto out;
884 
885 	iph = (void *)skb->data;
886 
887 	switch (iph->protocol) {
888 	case IPPROTO_TCP:
889 		if (!skb_partial_csum_set(skb, 4 * iph->ihl,
890 					  offsetof(struct tcphdr, check)))
891 			goto out;
892 
893 		if (recalculate_partial_csum) {
894 			struct tcphdr *tcph = tcp_hdr(skb);
895 			tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
896 							 skb->len - iph->ihl*4,
897 							 IPPROTO_TCP, 0);
898 		}
899 		break;
900 	case IPPROTO_UDP:
901 		if (!skb_partial_csum_set(skb, 4 * iph->ihl,
902 					  offsetof(struct udphdr, check)))
903 			goto out;
904 
905 		if (recalculate_partial_csum) {
906 			struct udphdr *udph = udp_hdr(skb);
907 			udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
908 							 skb->len - iph->ihl*4,
909 							 IPPROTO_UDP, 0);
910 		}
911 		break;
912 	default:
913 		if (net_ratelimit())
914 			pr_err("Attempting to checksum a non-TCP/UDP packet, dropping a protocol %d packet\n",
915 			       iph->protocol);
916 		goto out;
917 	}
918 
919 	err = 0;
920 
921 out:
922 	return err;
923 }
924 
925 static int handle_incoming_queue(struct net_device *dev,
926 				 struct sk_buff_head *rxq)
927 {
928 	struct netfront_info *np = netdev_priv(dev);
929 	struct netfront_stats *stats = this_cpu_ptr(np->stats);
930 	int packets_dropped = 0;
931 	struct sk_buff *skb;
932 
933 	while ((skb = __skb_dequeue(rxq)) != NULL) {
934 		int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
935 
936 		if (pull_to > skb_headlen(skb))
937 			__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
938 
939 		/* Ethernet work: Delayed to here as it peeks the header. */
940 		skb->protocol = eth_type_trans(skb, dev);
941 
942 		if (checksum_setup(dev, skb)) {
943 			kfree_skb(skb);
944 			packets_dropped++;
945 			dev->stats.rx_errors++;
946 			continue;
947 		}
948 
949 		u64_stats_update_begin(&stats->syncp);
950 		stats->rx_packets++;
951 		stats->rx_bytes += skb->len;
952 		u64_stats_update_end(&stats->syncp);
953 
954 		/* Pass it up. */
955 		napi_gro_receive(&np->napi, skb);
956 	}
957 
958 	return packets_dropped;
959 }
960 
961 static int xennet_poll(struct napi_struct *napi, int budget)
962 {
963 	struct netfront_info *np = container_of(napi, struct netfront_info, napi);
964 	struct net_device *dev = np->netdev;
965 	struct sk_buff *skb;
966 	struct netfront_rx_info rinfo;
967 	struct xen_netif_rx_response *rx = &rinfo.rx;
968 	struct xen_netif_extra_info *extras = rinfo.extras;
969 	RING_IDX i, rp;
970 	int work_done;
971 	struct sk_buff_head rxq;
972 	struct sk_buff_head errq;
973 	struct sk_buff_head tmpq;
974 	unsigned long flags;
975 	int err;
976 
977 	spin_lock(&np->rx_lock);
978 
979 	skb_queue_head_init(&rxq);
980 	skb_queue_head_init(&errq);
981 	skb_queue_head_init(&tmpq);
982 
983 	rp = np->rx.sring->rsp_prod;
984 	rmb(); /* Ensure we see queued responses up to 'rp'. */
985 
986 	i = np->rx.rsp_cons;
987 	work_done = 0;
988 	while ((i != rp) && (work_done < budget)) {
989 		memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
990 		memset(extras, 0, sizeof(rinfo.extras));
991 
992 		err = xennet_get_responses(np, &rinfo, rp, &tmpq);
993 
994 		if (unlikely(err)) {
995 err:
996 			while ((skb = __skb_dequeue(&tmpq)))
997 				__skb_queue_tail(&errq, skb);
998 			dev->stats.rx_errors++;
999 			i = np->rx.rsp_cons;
1000 			continue;
1001 		}
1002 
1003 		skb = __skb_dequeue(&tmpq);
1004 
1005 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1006 			struct xen_netif_extra_info *gso;
1007 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1008 
1009 			if (unlikely(xennet_set_skb_gso(skb, gso))) {
1010 				__skb_queue_head(&tmpq, skb);
1011 				np->rx.rsp_cons += skb_queue_len(&tmpq);
1012 				goto err;
1013 			}
1014 		}
1015 
1016 		NETFRONT_SKB_CB(skb)->pull_to = rx->status;
1017 		if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD)
1018 			NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD;
1019 
1020 		skb_shinfo(skb)->frags[0].page_offset = rx->offset;
1021 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
1022 		skb->data_len = rx->status;
1023 		skb->len += rx->status;
1024 
1025 		i = xennet_fill_frags(np, skb, &tmpq);
1026 
1027 		if (rx->flags & XEN_NETRXF_csum_blank)
1028 			skb->ip_summed = CHECKSUM_PARTIAL;
1029 		else if (rx->flags & XEN_NETRXF_data_validated)
1030 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1031 
1032 		__skb_queue_tail(&rxq, skb);
1033 
1034 		np->rx.rsp_cons = ++i;
1035 		work_done++;
1036 	}
1037 
1038 	__skb_queue_purge(&errq);
1039 
1040 	work_done -= handle_incoming_queue(dev, &rxq);
1041 
1042 	/* If we get a callback with very few responses, reduce fill target. */
1043 	/* NB. Note exponential increase, linear decrease. */
1044 	if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1045 	     ((3*np->rx_target) / 4)) &&
1046 	    (--np->rx_target < np->rx_min_target))
1047 		np->rx_target = np->rx_min_target;
1048 
1049 	xennet_alloc_rx_buffers(dev);
1050 
1051 	if (work_done < budget) {
1052 		int more_to_do = 0;
1053 
1054 		napi_gro_flush(napi, false);
1055 
1056 		local_irq_save(flags);
1057 
1058 		RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1059 		if (!more_to_do)
1060 			__napi_complete(napi);
1061 
1062 		local_irq_restore(flags);
1063 	}
1064 
1065 	spin_unlock(&np->rx_lock);
1066 
1067 	return work_done;
1068 }
1069 
1070 static int xennet_change_mtu(struct net_device *dev, int mtu)
1071 {
1072 	int max = xennet_can_sg(dev) ?
1073 		XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER : ETH_DATA_LEN;
1074 
1075 	if (mtu > max)
1076 		return -EINVAL;
1077 	dev->mtu = mtu;
1078 	return 0;
1079 }
1080 
1081 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1082 						    struct rtnl_link_stats64 *tot)
1083 {
1084 	struct netfront_info *np = netdev_priv(dev);
1085 	int cpu;
1086 
1087 	for_each_possible_cpu(cpu) {
1088 		struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1089 		u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1090 		unsigned int start;
1091 
1092 		do {
1093 			start = u64_stats_fetch_begin_bh(&stats->syncp);
1094 
1095 			rx_packets = stats->rx_packets;
1096 			tx_packets = stats->tx_packets;
1097 			rx_bytes = stats->rx_bytes;
1098 			tx_bytes = stats->tx_bytes;
1099 		} while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1100 
1101 		tot->rx_packets += rx_packets;
1102 		tot->tx_packets += tx_packets;
1103 		tot->rx_bytes   += rx_bytes;
1104 		tot->tx_bytes   += tx_bytes;
1105 	}
1106 
1107 	tot->rx_errors  = dev->stats.rx_errors;
1108 	tot->tx_dropped = dev->stats.tx_dropped;
1109 
1110 	return tot;
1111 }
1112 
1113 static void xennet_release_tx_bufs(struct netfront_info *np)
1114 {
1115 	struct sk_buff *skb;
1116 	int i;
1117 
1118 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1119 		/* Skip over entries which are actually freelist references */
1120 		if (skb_entry_is_link(&np->tx_skbs[i]))
1121 			continue;
1122 
1123 		skb = np->tx_skbs[i].skb;
1124 		gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
1125 					      GNTMAP_readonly);
1126 		gnttab_release_grant_reference(&np->gref_tx_head,
1127 					       np->grant_tx_ref[i]);
1128 		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1129 		add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
1130 		dev_kfree_skb_irq(skb);
1131 	}
1132 }
1133 
1134 static void xennet_release_rx_bufs(struct netfront_info *np)
1135 {
1136 	struct mmu_update      *mmu = np->rx_mmu;
1137 	struct multicall_entry *mcl = np->rx_mcl;
1138 	struct sk_buff_head free_list;
1139 	struct sk_buff *skb;
1140 	unsigned long mfn;
1141 	int xfer = 0, noxfer = 0, unused = 0;
1142 	int id, ref;
1143 
1144 	dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
1145 			 __func__);
1146 	return;
1147 
1148 	skb_queue_head_init(&free_list);
1149 
1150 	spin_lock_bh(&np->rx_lock);
1151 
1152 	for (id = 0; id < NET_RX_RING_SIZE; id++) {
1153 		ref = np->grant_rx_ref[id];
1154 		if (ref == GRANT_INVALID_REF) {
1155 			unused++;
1156 			continue;
1157 		}
1158 
1159 		skb = np->rx_skbs[id];
1160 		mfn = gnttab_end_foreign_transfer_ref(ref);
1161 		gnttab_release_grant_reference(&np->gref_rx_head, ref);
1162 		np->grant_rx_ref[id] = GRANT_INVALID_REF;
1163 
1164 		if (0 == mfn) {
1165 			skb_shinfo(skb)->nr_frags = 0;
1166 			dev_kfree_skb(skb);
1167 			noxfer++;
1168 			continue;
1169 		}
1170 
1171 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1172 			/* Remap the page. */
1173 			const struct page *page =
1174 				skb_frag_page(&skb_shinfo(skb)->frags[0]);
1175 			unsigned long pfn = page_to_pfn(page);
1176 			void *vaddr = page_address(page);
1177 
1178 			MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1179 						mfn_pte(mfn, PAGE_KERNEL),
1180 						0);
1181 			mcl++;
1182 			mmu->ptr = ((u64)mfn << PAGE_SHIFT)
1183 				| MMU_MACHPHYS_UPDATE;
1184 			mmu->val = pfn;
1185 			mmu++;
1186 
1187 			set_phys_to_machine(pfn, mfn);
1188 		}
1189 		__skb_queue_tail(&free_list, skb);
1190 		xfer++;
1191 	}
1192 
1193 	dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
1194 		 __func__, xfer, noxfer, unused);
1195 
1196 	if (xfer) {
1197 		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1198 			/* Do all the remapping work and M2P updates. */
1199 			MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
1200 					 NULL, DOMID_SELF);
1201 			mcl++;
1202 			HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
1203 		}
1204 	}
1205 
1206 	__skb_queue_purge(&free_list);
1207 
1208 	spin_unlock_bh(&np->rx_lock);
1209 }
1210 
1211 static void xennet_uninit(struct net_device *dev)
1212 {
1213 	struct netfront_info *np = netdev_priv(dev);
1214 	xennet_release_tx_bufs(np);
1215 	xennet_release_rx_bufs(np);
1216 	gnttab_free_grant_references(np->gref_tx_head);
1217 	gnttab_free_grant_references(np->gref_rx_head);
1218 }
1219 
1220 static netdev_features_t xennet_fix_features(struct net_device *dev,
1221 	netdev_features_t features)
1222 {
1223 	struct netfront_info *np = netdev_priv(dev);
1224 	int val;
1225 
1226 	if (features & NETIF_F_SG) {
1227 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1228 				 "%d", &val) < 0)
1229 			val = 0;
1230 
1231 		if (!val)
1232 			features &= ~NETIF_F_SG;
1233 	}
1234 
1235 	if (features & NETIF_F_TSO) {
1236 		if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1237 				 "feature-gso-tcpv4", "%d", &val) < 0)
1238 			val = 0;
1239 
1240 		if (!val)
1241 			features &= ~NETIF_F_TSO;
1242 	}
1243 
1244 	return features;
1245 }
1246 
1247 static int xennet_set_features(struct net_device *dev,
1248 	netdev_features_t features)
1249 {
1250 	if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
1251 		netdev_info(dev, "Reducing MTU because no SG offload");
1252 		dev->mtu = ETH_DATA_LEN;
1253 	}
1254 
1255 	return 0;
1256 }
1257 
1258 static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
1259 {
1260 	struct netfront_info *np = dev_id;
1261 	struct net_device *dev = np->netdev;
1262 	unsigned long flags;
1263 
1264 	spin_lock_irqsave(&np->tx_lock, flags);
1265 	xennet_tx_buf_gc(dev);
1266 	spin_unlock_irqrestore(&np->tx_lock, flags);
1267 
1268 	return IRQ_HANDLED;
1269 }
1270 
1271 static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1272 {
1273 	struct netfront_info *np = dev_id;
1274 	struct net_device *dev = np->netdev;
1275 
1276 	if (likely(netif_carrier_ok(dev) &&
1277 		   RING_HAS_UNCONSUMED_RESPONSES(&np->rx)))
1278 			napi_schedule(&np->napi);
1279 
1280 	return IRQ_HANDLED;
1281 }
1282 
1283 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1284 {
1285 	xennet_tx_interrupt(irq, dev_id);
1286 	xennet_rx_interrupt(irq, dev_id);
1287 	return IRQ_HANDLED;
1288 }
1289 
1290 #ifdef CONFIG_NET_POLL_CONTROLLER
1291 static void xennet_poll_controller(struct net_device *dev)
1292 {
1293 	xennet_interrupt(0, dev);
1294 }
1295 #endif
1296 
1297 static const struct net_device_ops xennet_netdev_ops = {
1298 	.ndo_open            = xennet_open,
1299 	.ndo_uninit          = xennet_uninit,
1300 	.ndo_stop            = xennet_close,
1301 	.ndo_start_xmit      = xennet_start_xmit,
1302 	.ndo_change_mtu	     = xennet_change_mtu,
1303 	.ndo_get_stats64     = xennet_get_stats64,
1304 	.ndo_set_mac_address = eth_mac_addr,
1305 	.ndo_validate_addr   = eth_validate_addr,
1306 	.ndo_fix_features    = xennet_fix_features,
1307 	.ndo_set_features    = xennet_set_features,
1308 #ifdef CONFIG_NET_POLL_CONTROLLER
1309 	.ndo_poll_controller = xennet_poll_controller,
1310 #endif
1311 };
1312 
1313 static struct net_device *xennet_create_dev(struct xenbus_device *dev)
1314 {
1315 	int i, err;
1316 	struct net_device *netdev;
1317 	struct netfront_info *np;
1318 
1319 	netdev = alloc_etherdev(sizeof(struct netfront_info));
1320 	if (!netdev)
1321 		return ERR_PTR(-ENOMEM);
1322 
1323 	np                   = netdev_priv(netdev);
1324 	np->xbdev            = dev;
1325 
1326 	spin_lock_init(&np->tx_lock);
1327 	spin_lock_init(&np->rx_lock);
1328 
1329 	skb_queue_head_init(&np->rx_batch);
1330 	np->rx_target     = RX_DFL_MIN_TARGET;
1331 	np->rx_min_target = RX_DFL_MIN_TARGET;
1332 	np->rx_max_target = RX_MAX_TARGET;
1333 
1334 	init_timer(&np->rx_refill_timer);
1335 	np->rx_refill_timer.data = (unsigned long)netdev;
1336 	np->rx_refill_timer.function = rx_refill_timeout;
1337 
1338 	err = -ENOMEM;
1339 	np->stats = alloc_percpu(struct netfront_stats);
1340 	if (np->stats == NULL)
1341 		goto exit;
1342 
1343 	/* Initialise tx_skbs as a free chain containing every entry. */
1344 	np->tx_skb_freelist = 0;
1345 	for (i = 0; i < NET_TX_RING_SIZE; i++) {
1346 		skb_entry_set_link(&np->tx_skbs[i], i+1);
1347 		np->grant_tx_ref[i] = GRANT_INVALID_REF;
1348 	}
1349 
1350 	/* Clear out rx_skbs */
1351 	for (i = 0; i < NET_RX_RING_SIZE; i++) {
1352 		np->rx_skbs[i] = NULL;
1353 		np->grant_rx_ref[i] = GRANT_INVALID_REF;
1354 	}
1355 
1356 	/* A grant for every tx ring slot */
1357 	if (gnttab_alloc_grant_references(TX_MAX_TARGET,
1358 					  &np->gref_tx_head) < 0) {
1359 		pr_alert("can't alloc tx grant refs\n");
1360 		err = -ENOMEM;
1361 		goto exit_free_stats;
1362 	}
1363 	/* A grant for every rx ring slot */
1364 	if (gnttab_alloc_grant_references(RX_MAX_TARGET,
1365 					  &np->gref_rx_head) < 0) {
1366 		pr_alert("can't alloc rx grant refs\n");
1367 		err = -ENOMEM;
1368 		goto exit_free_tx;
1369 	}
1370 
1371 	netdev->netdev_ops	= &xennet_netdev_ops;
1372 
1373 	netif_napi_add(netdev, &np->napi, xennet_poll, 64);
1374 	netdev->features        = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
1375 				  NETIF_F_GSO_ROBUST;
1376 	netdev->hw_features	= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
1377 
1378 	/*
1379          * Assume that all hw features are available for now. This set
1380          * will be adjusted by the call to netdev_update_features() in
1381          * xennet_connect() which is the earliest point where we can
1382          * negotiate with the backend regarding supported features.
1383          */
1384 	netdev->features |= netdev->hw_features;
1385 
1386 	SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
1387 	SET_NETDEV_DEV(netdev, &dev->dev);
1388 
1389 	netif_set_gso_max_size(netdev, XEN_NETIF_MAX_TX_SIZE - MAX_TCP_HEADER);
1390 
1391 	np->netdev = netdev;
1392 
1393 	netif_carrier_off(netdev);
1394 
1395 	return netdev;
1396 
1397  exit_free_tx:
1398 	gnttab_free_grant_references(np->gref_tx_head);
1399  exit_free_stats:
1400 	free_percpu(np->stats);
1401  exit:
1402 	free_netdev(netdev);
1403 	return ERR_PTR(err);
1404 }
1405 
1406 /**
1407  * Entry point to this code when a new device is created.  Allocate the basic
1408  * structures and the ring buffers for communication with the backend, and
1409  * inform the backend of the appropriate details for those.
1410  */
1411 static int netfront_probe(struct xenbus_device *dev,
1412 			  const struct xenbus_device_id *id)
1413 {
1414 	int err;
1415 	struct net_device *netdev;
1416 	struct netfront_info *info;
1417 
1418 	netdev = xennet_create_dev(dev);
1419 	if (IS_ERR(netdev)) {
1420 		err = PTR_ERR(netdev);
1421 		xenbus_dev_fatal(dev, err, "creating netdev");
1422 		return err;
1423 	}
1424 
1425 	info = netdev_priv(netdev);
1426 	dev_set_drvdata(&dev->dev, info);
1427 
1428 	err = register_netdev(info->netdev);
1429 	if (err) {
1430 		pr_warn("%s: register_netdev err=%d\n", __func__, err);
1431 		goto fail;
1432 	}
1433 
1434 	err = xennet_sysfs_addif(info->netdev);
1435 	if (err) {
1436 		unregister_netdev(info->netdev);
1437 		pr_warn("%s: add sysfs failed err=%d\n", __func__, err);
1438 		goto fail;
1439 	}
1440 
1441 	return 0;
1442 
1443  fail:
1444 	free_netdev(netdev);
1445 	dev_set_drvdata(&dev->dev, NULL);
1446 	return err;
1447 }
1448 
1449 static void xennet_end_access(int ref, void *page)
1450 {
1451 	/* This frees the page as a side-effect */
1452 	if (ref != GRANT_INVALID_REF)
1453 		gnttab_end_foreign_access(ref, 0, (unsigned long)page);
1454 }
1455 
1456 static void xennet_disconnect_backend(struct netfront_info *info)
1457 {
1458 	/* Stop old i/f to prevent errors whilst we rebuild the state. */
1459 	spin_lock_bh(&info->rx_lock);
1460 	spin_lock_irq(&info->tx_lock);
1461 	netif_carrier_off(info->netdev);
1462 	spin_unlock_irq(&info->tx_lock);
1463 	spin_unlock_bh(&info->rx_lock);
1464 
1465 	if (info->tx_irq && (info->tx_irq == info->rx_irq))
1466 		unbind_from_irqhandler(info->tx_irq, info);
1467 	if (info->tx_irq && (info->tx_irq != info->rx_irq)) {
1468 		unbind_from_irqhandler(info->tx_irq, info);
1469 		unbind_from_irqhandler(info->rx_irq, info);
1470 	}
1471 	info->tx_evtchn = info->rx_evtchn = 0;
1472 	info->tx_irq = info->rx_irq = 0;
1473 
1474 	/* End access and free the pages */
1475 	xennet_end_access(info->tx_ring_ref, info->tx.sring);
1476 	xennet_end_access(info->rx_ring_ref, info->rx.sring);
1477 
1478 	info->tx_ring_ref = GRANT_INVALID_REF;
1479 	info->rx_ring_ref = GRANT_INVALID_REF;
1480 	info->tx.sring = NULL;
1481 	info->rx.sring = NULL;
1482 }
1483 
1484 /**
1485  * We are reconnecting to the backend, due to a suspend/resume, or a backend
1486  * driver restart.  We tear down our netif structure and recreate it, but
1487  * leave the device-layer structures intact so that this is transparent to the
1488  * rest of the kernel.
1489  */
1490 static int netfront_resume(struct xenbus_device *dev)
1491 {
1492 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
1493 
1494 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
1495 
1496 	xennet_disconnect_backend(info);
1497 	return 0;
1498 }
1499 
1500 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
1501 {
1502 	char *s, *e, *macstr;
1503 	int i;
1504 
1505 	macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
1506 	if (IS_ERR(macstr))
1507 		return PTR_ERR(macstr);
1508 
1509 	for (i = 0; i < ETH_ALEN; i++) {
1510 		mac[i] = simple_strtoul(s, &e, 16);
1511 		if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
1512 			kfree(macstr);
1513 			return -ENOENT;
1514 		}
1515 		s = e+1;
1516 	}
1517 
1518 	kfree(macstr);
1519 	return 0;
1520 }
1521 
1522 static int setup_netfront_single(struct netfront_info *info)
1523 {
1524 	int err;
1525 
1526 	err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1527 	if (err < 0)
1528 		goto fail;
1529 
1530 	err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1531 					xennet_interrupt,
1532 					0, info->netdev->name, info);
1533 	if (err < 0)
1534 		goto bind_fail;
1535 	info->rx_evtchn = info->tx_evtchn;
1536 	info->rx_irq = info->tx_irq = err;
1537 
1538 	return 0;
1539 
1540 bind_fail:
1541 	xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1542 	info->tx_evtchn = 0;
1543 fail:
1544 	return err;
1545 }
1546 
1547 static int setup_netfront_split(struct netfront_info *info)
1548 {
1549 	int err;
1550 
1551 	err = xenbus_alloc_evtchn(info->xbdev, &info->tx_evtchn);
1552 	if (err < 0)
1553 		goto fail;
1554 	err = xenbus_alloc_evtchn(info->xbdev, &info->rx_evtchn);
1555 	if (err < 0)
1556 		goto alloc_rx_evtchn_fail;
1557 
1558 	snprintf(info->tx_irq_name, sizeof(info->tx_irq_name),
1559 		 "%s-tx", info->netdev->name);
1560 	err = bind_evtchn_to_irqhandler(info->tx_evtchn,
1561 					xennet_tx_interrupt,
1562 					0, info->tx_irq_name, info);
1563 	if (err < 0)
1564 		goto bind_tx_fail;
1565 	info->tx_irq = err;
1566 
1567 	snprintf(info->rx_irq_name, sizeof(info->rx_irq_name),
1568 		 "%s-rx", info->netdev->name);
1569 	err = bind_evtchn_to_irqhandler(info->rx_evtchn,
1570 					xennet_rx_interrupt,
1571 					0, info->rx_irq_name, info);
1572 	if (err < 0)
1573 		goto bind_rx_fail;
1574 	info->rx_irq = err;
1575 
1576 	return 0;
1577 
1578 bind_rx_fail:
1579 	unbind_from_irqhandler(info->tx_irq, info);
1580 	info->tx_irq = 0;
1581 bind_tx_fail:
1582 	xenbus_free_evtchn(info->xbdev, info->rx_evtchn);
1583 	info->rx_evtchn = 0;
1584 alloc_rx_evtchn_fail:
1585 	xenbus_free_evtchn(info->xbdev, info->tx_evtchn);
1586 	info->tx_evtchn = 0;
1587 fail:
1588 	return err;
1589 }
1590 
1591 static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
1592 {
1593 	struct xen_netif_tx_sring *txs;
1594 	struct xen_netif_rx_sring *rxs;
1595 	int err;
1596 	struct net_device *netdev = info->netdev;
1597 	unsigned int feature_split_evtchn;
1598 
1599 	info->tx_ring_ref = GRANT_INVALID_REF;
1600 	info->rx_ring_ref = GRANT_INVALID_REF;
1601 	info->rx.sring = NULL;
1602 	info->tx.sring = NULL;
1603 	netdev->irq = 0;
1604 
1605 	err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
1606 			   "feature-split-event-channels", "%u",
1607 			   &feature_split_evtchn);
1608 	if (err < 0)
1609 		feature_split_evtchn = 0;
1610 
1611 	err = xen_net_read_mac(dev, netdev->dev_addr);
1612 	if (err) {
1613 		xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
1614 		goto fail;
1615 	}
1616 
1617 	txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1618 	if (!txs) {
1619 		err = -ENOMEM;
1620 		xenbus_dev_fatal(dev, err, "allocating tx ring page");
1621 		goto fail;
1622 	}
1623 	SHARED_RING_INIT(txs);
1624 	FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
1625 
1626 	err = xenbus_grant_ring(dev, virt_to_mfn(txs));
1627 	if (err < 0)
1628 		goto grant_tx_ring_fail;
1629 
1630 	info->tx_ring_ref = err;
1631 	rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
1632 	if (!rxs) {
1633 		err = -ENOMEM;
1634 		xenbus_dev_fatal(dev, err, "allocating rx ring page");
1635 		goto alloc_rx_ring_fail;
1636 	}
1637 	SHARED_RING_INIT(rxs);
1638 	FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
1639 
1640 	err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
1641 	if (err < 0)
1642 		goto grant_rx_ring_fail;
1643 	info->rx_ring_ref = err;
1644 
1645 	if (feature_split_evtchn)
1646 		err = setup_netfront_split(info);
1647 	/* setup single event channel if
1648 	 *  a) feature-split-event-channels == 0
1649 	 *  b) feature-split-event-channels == 1 but failed to setup
1650 	 */
1651 	if (!feature_split_evtchn || (feature_split_evtchn && err))
1652 		err = setup_netfront_single(info);
1653 
1654 	if (err)
1655 		goto alloc_evtchn_fail;
1656 
1657 	return 0;
1658 
1659 	/* If we fail to setup netfront, it is safe to just revoke access to
1660 	 * granted pages because backend is not accessing it at this point.
1661 	 */
1662 alloc_evtchn_fail:
1663 	gnttab_end_foreign_access_ref(info->rx_ring_ref, 0);
1664 grant_rx_ring_fail:
1665 	free_page((unsigned long)rxs);
1666 alloc_rx_ring_fail:
1667 	gnttab_end_foreign_access_ref(info->tx_ring_ref, 0);
1668 grant_tx_ring_fail:
1669 	free_page((unsigned long)txs);
1670 fail:
1671 	return err;
1672 }
1673 
1674 /* Common code used when first setting up, and when resuming. */
1675 static int talk_to_netback(struct xenbus_device *dev,
1676 			   struct netfront_info *info)
1677 {
1678 	const char *message;
1679 	struct xenbus_transaction xbt;
1680 	int err;
1681 
1682 	/* Create shared ring, alloc event channel. */
1683 	err = setup_netfront(dev, info);
1684 	if (err)
1685 		goto out;
1686 
1687 again:
1688 	err = xenbus_transaction_start(&xbt);
1689 	if (err) {
1690 		xenbus_dev_fatal(dev, err, "starting transaction");
1691 		goto destroy_ring;
1692 	}
1693 
1694 	err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
1695 			    info->tx_ring_ref);
1696 	if (err) {
1697 		message = "writing tx ring-ref";
1698 		goto abort_transaction;
1699 	}
1700 	err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
1701 			    info->rx_ring_ref);
1702 	if (err) {
1703 		message = "writing rx ring-ref";
1704 		goto abort_transaction;
1705 	}
1706 
1707 	if (info->tx_evtchn == info->rx_evtchn) {
1708 		err = xenbus_printf(xbt, dev->nodename,
1709 				    "event-channel", "%u", info->tx_evtchn);
1710 		if (err) {
1711 			message = "writing event-channel";
1712 			goto abort_transaction;
1713 		}
1714 	} else {
1715 		err = xenbus_printf(xbt, dev->nodename,
1716 				    "event-channel-tx", "%u", info->tx_evtchn);
1717 		if (err) {
1718 			message = "writing event-channel-tx";
1719 			goto abort_transaction;
1720 		}
1721 		err = xenbus_printf(xbt, dev->nodename,
1722 				    "event-channel-rx", "%u", info->rx_evtchn);
1723 		if (err) {
1724 			message = "writing event-channel-rx";
1725 			goto abort_transaction;
1726 		}
1727 	}
1728 
1729 	err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1730 			    1);
1731 	if (err) {
1732 		message = "writing request-rx-copy";
1733 		goto abort_transaction;
1734 	}
1735 
1736 	err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
1737 	if (err) {
1738 		message = "writing feature-rx-notify";
1739 		goto abort_transaction;
1740 	}
1741 
1742 	err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
1743 	if (err) {
1744 		message = "writing feature-sg";
1745 		goto abort_transaction;
1746 	}
1747 
1748 	err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
1749 	if (err) {
1750 		message = "writing feature-gso-tcpv4";
1751 		goto abort_transaction;
1752 	}
1753 
1754 	err = xenbus_transaction_end(xbt, 0);
1755 	if (err) {
1756 		if (err == -EAGAIN)
1757 			goto again;
1758 		xenbus_dev_fatal(dev, err, "completing transaction");
1759 		goto destroy_ring;
1760 	}
1761 
1762 	return 0;
1763 
1764  abort_transaction:
1765 	xenbus_transaction_end(xbt, 1);
1766 	xenbus_dev_fatal(dev, err, "%s", message);
1767  destroy_ring:
1768 	xennet_disconnect_backend(info);
1769  out:
1770 	return err;
1771 }
1772 
1773 static int xennet_connect(struct net_device *dev)
1774 {
1775 	struct netfront_info *np = netdev_priv(dev);
1776 	int i, requeue_idx, err;
1777 	struct sk_buff *skb;
1778 	grant_ref_t ref;
1779 	struct xen_netif_rx_request *req;
1780 	unsigned int feature_rx_copy;
1781 
1782 	err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1783 			   "feature-rx-copy", "%u", &feature_rx_copy);
1784 	if (err != 1)
1785 		feature_rx_copy = 0;
1786 
1787 	if (!feature_rx_copy) {
1788 		dev_info(&dev->dev,
1789 			 "backend does not support copying receive path\n");
1790 		return -ENODEV;
1791 	}
1792 
1793 	err = talk_to_netback(np->xbdev, np);
1794 	if (err)
1795 		return err;
1796 
1797 	rtnl_lock();
1798 	netdev_update_features(dev);
1799 	rtnl_unlock();
1800 
1801 	spin_lock_bh(&np->rx_lock);
1802 	spin_lock_irq(&np->tx_lock);
1803 
1804 	/* Step 1: Discard all pending TX packet fragments. */
1805 	xennet_release_tx_bufs(np);
1806 
1807 	/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1808 	for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1809 		skb_frag_t *frag;
1810 		const struct page *page;
1811 		if (!np->rx_skbs[i])
1812 			continue;
1813 
1814 		skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1815 		ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1816 		req = RING_GET_REQUEST(&np->rx, requeue_idx);
1817 
1818 		frag = &skb_shinfo(skb)->frags[0];
1819 		page = skb_frag_page(frag);
1820 		gnttab_grant_foreign_access_ref(
1821 			ref, np->xbdev->otherend_id,
1822 			pfn_to_mfn(page_to_pfn(page)),
1823 			0);
1824 		req->gref = ref;
1825 		req->id   = requeue_idx;
1826 
1827 		requeue_idx++;
1828 	}
1829 
1830 	np->rx.req_prod_pvt = requeue_idx;
1831 
1832 	/*
1833 	 * Step 3: All public and private state should now be sane.  Get
1834 	 * ready to start sending and receiving packets and give the driver
1835 	 * domain a kick because we've probably just requeued some
1836 	 * packets.
1837 	 */
1838 	netif_carrier_on(np->netdev);
1839 	notify_remote_via_irq(np->tx_irq);
1840 	if (np->tx_irq != np->rx_irq)
1841 		notify_remote_via_irq(np->rx_irq);
1842 	xennet_tx_buf_gc(dev);
1843 	xennet_alloc_rx_buffers(dev);
1844 
1845 	spin_unlock_irq(&np->tx_lock);
1846 	spin_unlock_bh(&np->rx_lock);
1847 
1848 	return 0;
1849 }
1850 
1851 /**
1852  * Callback received when the backend's state changes.
1853  */
1854 static void netback_changed(struct xenbus_device *dev,
1855 			    enum xenbus_state backend_state)
1856 {
1857 	struct netfront_info *np = dev_get_drvdata(&dev->dev);
1858 	struct net_device *netdev = np->netdev;
1859 
1860 	dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
1861 
1862 	switch (backend_state) {
1863 	case XenbusStateInitialising:
1864 	case XenbusStateInitialised:
1865 	case XenbusStateReconfiguring:
1866 	case XenbusStateReconfigured:
1867 	case XenbusStateUnknown:
1868 	case XenbusStateClosed:
1869 		break;
1870 
1871 	case XenbusStateInitWait:
1872 		if (dev->state != XenbusStateInitialising)
1873 			break;
1874 		if (xennet_connect(netdev) != 0)
1875 			break;
1876 		xenbus_switch_state(dev, XenbusStateConnected);
1877 		break;
1878 
1879 	case XenbusStateConnected:
1880 		netdev_notify_peers(netdev);
1881 		break;
1882 
1883 	case XenbusStateClosing:
1884 		xenbus_frontend_closed(dev);
1885 		break;
1886 	}
1887 }
1888 
1889 static const struct xennet_stat {
1890 	char name[ETH_GSTRING_LEN];
1891 	u16 offset;
1892 } xennet_stats[] = {
1893 	{
1894 		"rx_gso_checksum_fixup",
1895 		offsetof(struct netfront_info, rx_gso_checksum_fixup)
1896 	},
1897 };
1898 
1899 static int xennet_get_sset_count(struct net_device *dev, int string_set)
1900 {
1901 	switch (string_set) {
1902 	case ETH_SS_STATS:
1903 		return ARRAY_SIZE(xennet_stats);
1904 	default:
1905 		return -EINVAL;
1906 	}
1907 }
1908 
1909 static void xennet_get_ethtool_stats(struct net_device *dev,
1910 				     struct ethtool_stats *stats, u64 * data)
1911 {
1912 	void *np = netdev_priv(dev);
1913 	int i;
1914 
1915 	for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1916 		data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
1917 }
1918 
1919 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
1920 {
1921 	int i;
1922 
1923 	switch (stringset) {
1924 	case ETH_SS_STATS:
1925 		for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1926 			memcpy(data + i * ETH_GSTRING_LEN,
1927 			       xennet_stats[i].name, ETH_GSTRING_LEN);
1928 		break;
1929 	}
1930 }
1931 
1932 static const struct ethtool_ops xennet_ethtool_ops =
1933 {
1934 	.get_link = ethtool_op_get_link,
1935 
1936 	.get_sset_count = xennet_get_sset_count,
1937 	.get_ethtool_stats = xennet_get_ethtool_stats,
1938 	.get_strings = xennet_get_strings,
1939 };
1940 
1941 #ifdef CONFIG_SYSFS
1942 static ssize_t show_rxbuf_min(struct device *dev,
1943 			      struct device_attribute *attr, char *buf)
1944 {
1945 	struct net_device *netdev = to_net_dev(dev);
1946 	struct netfront_info *info = netdev_priv(netdev);
1947 
1948 	return sprintf(buf, "%u\n", info->rx_min_target);
1949 }
1950 
1951 static ssize_t store_rxbuf_min(struct device *dev,
1952 			       struct device_attribute *attr,
1953 			       const char *buf, size_t len)
1954 {
1955 	struct net_device *netdev = to_net_dev(dev);
1956 	struct netfront_info *np = netdev_priv(netdev);
1957 	char *endp;
1958 	unsigned long target;
1959 
1960 	if (!capable(CAP_NET_ADMIN))
1961 		return -EPERM;
1962 
1963 	target = simple_strtoul(buf, &endp, 0);
1964 	if (endp == buf)
1965 		return -EBADMSG;
1966 
1967 	if (target < RX_MIN_TARGET)
1968 		target = RX_MIN_TARGET;
1969 	if (target > RX_MAX_TARGET)
1970 		target = RX_MAX_TARGET;
1971 
1972 	spin_lock_bh(&np->rx_lock);
1973 	if (target > np->rx_max_target)
1974 		np->rx_max_target = target;
1975 	np->rx_min_target = target;
1976 	if (target > np->rx_target)
1977 		np->rx_target = target;
1978 
1979 	xennet_alloc_rx_buffers(netdev);
1980 
1981 	spin_unlock_bh(&np->rx_lock);
1982 	return len;
1983 }
1984 
1985 static ssize_t show_rxbuf_max(struct device *dev,
1986 			      struct device_attribute *attr, char *buf)
1987 {
1988 	struct net_device *netdev = to_net_dev(dev);
1989 	struct netfront_info *info = netdev_priv(netdev);
1990 
1991 	return sprintf(buf, "%u\n", info->rx_max_target);
1992 }
1993 
1994 static ssize_t store_rxbuf_max(struct device *dev,
1995 			       struct device_attribute *attr,
1996 			       const char *buf, size_t len)
1997 {
1998 	struct net_device *netdev = to_net_dev(dev);
1999 	struct netfront_info *np = netdev_priv(netdev);
2000 	char *endp;
2001 	unsigned long target;
2002 
2003 	if (!capable(CAP_NET_ADMIN))
2004 		return -EPERM;
2005 
2006 	target = simple_strtoul(buf, &endp, 0);
2007 	if (endp == buf)
2008 		return -EBADMSG;
2009 
2010 	if (target < RX_MIN_TARGET)
2011 		target = RX_MIN_TARGET;
2012 	if (target > RX_MAX_TARGET)
2013 		target = RX_MAX_TARGET;
2014 
2015 	spin_lock_bh(&np->rx_lock);
2016 	if (target < np->rx_min_target)
2017 		np->rx_min_target = target;
2018 	np->rx_max_target = target;
2019 	if (target < np->rx_target)
2020 		np->rx_target = target;
2021 
2022 	xennet_alloc_rx_buffers(netdev);
2023 
2024 	spin_unlock_bh(&np->rx_lock);
2025 	return len;
2026 }
2027 
2028 static ssize_t show_rxbuf_cur(struct device *dev,
2029 			      struct device_attribute *attr, char *buf)
2030 {
2031 	struct net_device *netdev = to_net_dev(dev);
2032 	struct netfront_info *info = netdev_priv(netdev);
2033 
2034 	return sprintf(buf, "%u\n", info->rx_target);
2035 }
2036 
2037 static struct device_attribute xennet_attrs[] = {
2038 	__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2039 	__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2040 	__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2041 };
2042 
2043 static int xennet_sysfs_addif(struct net_device *netdev)
2044 {
2045 	int i;
2046 	int err;
2047 
2048 	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2049 		err = device_create_file(&netdev->dev,
2050 					   &xennet_attrs[i]);
2051 		if (err)
2052 			goto fail;
2053 	}
2054 	return 0;
2055 
2056  fail:
2057 	while (--i >= 0)
2058 		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2059 	return err;
2060 }
2061 
2062 static void xennet_sysfs_delif(struct net_device *netdev)
2063 {
2064 	int i;
2065 
2066 	for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2067 		device_remove_file(&netdev->dev, &xennet_attrs[i]);
2068 }
2069 
2070 #endif /* CONFIG_SYSFS */
2071 
2072 static const struct xenbus_device_id netfront_ids[] = {
2073 	{ "vif" },
2074 	{ "" }
2075 };
2076 
2077 
2078 static int xennet_remove(struct xenbus_device *dev)
2079 {
2080 	struct netfront_info *info = dev_get_drvdata(&dev->dev);
2081 
2082 	dev_dbg(&dev->dev, "%s\n", dev->nodename);
2083 
2084 	xennet_disconnect_backend(info);
2085 
2086 	xennet_sysfs_delif(info->netdev);
2087 
2088 	unregister_netdev(info->netdev);
2089 
2090 	del_timer_sync(&info->rx_refill_timer);
2091 
2092 	free_percpu(info->stats);
2093 
2094 	free_netdev(info->netdev);
2095 
2096 	return 0;
2097 }
2098 
2099 static DEFINE_XENBUS_DRIVER(netfront, ,
2100 	.probe = netfront_probe,
2101 	.remove = xennet_remove,
2102 	.resume = netfront_resume,
2103 	.otherend_changed = netback_changed,
2104 );
2105 
2106 static int __init netif_init(void)
2107 {
2108 	if (!xen_domain())
2109 		return -ENODEV;
2110 
2111 	if (xen_hvm_domain() && !xen_platform_pci_unplug)
2112 		return -ENODEV;
2113 
2114 	pr_info("Initialising Xen virtual ethernet driver\n");
2115 
2116 	return xenbus_register_frontend(&netfront_driver);
2117 }
2118 module_init(netif_init);
2119 
2120 
2121 static void __exit netif_exit(void)
2122 {
2123 	xenbus_unregister_driver(&netfront_driver);
2124 }
2125 module_exit(netif_exit);
2126 
2127 MODULE_DESCRIPTION("Xen virtual network device frontend");
2128 MODULE_LICENSE("GPL");
2129 MODULE_ALIAS("xen:vif");
2130 MODULE_ALIAS("xennet");
2131