xref: /linux/drivers/net/xen-netback/netback.c (revision 957e3facd147510f2cf8780e38606f1d707f0e33)
1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34 
35 #include "common.h"
36 
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 #include <linux/highmem.h>
41 
42 #include <net/tcp.h>
43 
44 #include <xen/xen.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
47 
48 #include <asm/xen/hypercall.h>
49 #include <asm/xen/page.h>
50 
51 /* Provide an option to disable split event channels at load time as
52  * event channels are limited resource. Split event channels are
53  * enabled by default.
54  */
55 bool separate_tx_rx_irq = 1;
56 module_param(separate_tx_rx_irq, bool, 0644);
57 
58 /* The time that packets can stay on the guest Rx internal queue
59  * before they are dropped.
60  */
61 unsigned int rx_drain_timeout_msecs = 10000;
62 module_param(rx_drain_timeout_msecs, uint, 0444);
63 unsigned int rx_drain_timeout_jiffies;
64 
65 /* The length of time before the frontend is considered unresponsive
66  * because it isn't providing Rx slots.
67  */
68 static unsigned int rx_stall_timeout_msecs = 60000;
69 module_param(rx_stall_timeout_msecs, uint, 0444);
70 static unsigned int rx_stall_timeout_jiffies;
71 
72 unsigned int xenvif_max_queues;
73 module_param_named(max_queues, xenvif_max_queues, uint, 0644);
74 MODULE_PARM_DESC(max_queues,
75 		 "Maximum number of queues per virtual interface");
76 
77 /*
78  * This is the maximum slots a skb can have. If a guest sends a skb
79  * which exceeds this limit it is considered malicious.
80  */
81 #define FATAL_SKB_SLOTS_DEFAULT 20
82 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
83 module_param(fatal_skb_slots, uint, 0444);
84 
85 /* The amount to copy out of the first guest Tx slot into the skb's
86  * linear area.  If the first slot has more data, it will be mapped
87  * and put into the first frag.
88  *
89  * This is sized to avoid pulling headers from the frags for most
90  * TCP/IP packets.
91  */
92 #define XEN_NETBACK_TX_COPY_LEN 128
93 
94 
95 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
96 			       u8 status);
97 
98 static void make_tx_response(struct xenvif_queue *queue,
99 			     struct xen_netif_tx_request *txp,
100 			     s8       st);
101 
102 static inline int tx_work_todo(struct xenvif_queue *queue);
103 
104 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
105 					     u16      id,
106 					     s8       st,
107 					     u16      offset,
108 					     u16      size,
109 					     u16      flags);
110 
111 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
112 				       u16 idx)
113 {
114 	return page_to_pfn(queue->mmap_pages[idx]);
115 }
116 
117 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
118 					 u16 idx)
119 {
120 	return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
121 }
122 
123 #define callback_param(vif, pending_idx) \
124 	(vif->pending_tx_info[pending_idx].callback_struct)
125 
126 /* Find the containing VIF's structure from a pointer in pending_tx_info array
127  */
128 static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info *ubuf)
129 {
130 	u16 pending_idx = ubuf->desc;
131 	struct pending_tx_info *temp =
132 		container_of(ubuf, struct pending_tx_info, callback_struct);
133 	return container_of(temp - pending_idx,
134 			    struct xenvif_queue,
135 			    pending_tx_info[0]);
136 }
137 
138 static u16 frag_get_pending_idx(skb_frag_t *frag)
139 {
140 	return (u16)frag->page_offset;
141 }
142 
143 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
144 {
145 	frag->page_offset = pending_idx;
146 }
147 
148 static inline pending_ring_idx_t pending_index(unsigned i)
149 {
150 	return i & (MAX_PENDING_REQS-1);
151 }
152 
153 bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
154 {
155 	RING_IDX prod, cons;
156 
157 	do {
158 		prod = queue->rx.sring->req_prod;
159 		cons = queue->rx.req_cons;
160 
161 		if (prod - cons >= needed)
162 			return true;
163 
164 		queue->rx.sring->req_event = prod + 1;
165 
166 		/* Make sure event is visible before we check prod
167 		 * again.
168 		 */
169 		mb();
170 	} while (queue->rx.sring->req_prod != prod);
171 
172 	return false;
173 }
174 
175 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
176 {
177 	unsigned long flags;
178 
179 	spin_lock_irqsave(&queue->rx_queue.lock, flags);
180 
181 	__skb_queue_tail(&queue->rx_queue, skb);
182 
183 	queue->rx_queue_len += skb->len;
184 	if (queue->rx_queue_len > queue->rx_queue_max)
185 		netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
186 
187 	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
188 }
189 
190 static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
191 {
192 	struct sk_buff *skb;
193 
194 	spin_lock_irq(&queue->rx_queue.lock);
195 
196 	skb = __skb_dequeue(&queue->rx_queue);
197 	if (skb)
198 		queue->rx_queue_len -= skb->len;
199 
200 	spin_unlock_irq(&queue->rx_queue.lock);
201 
202 	return skb;
203 }
204 
205 static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
206 {
207 	spin_lock_irq(&queue->rx_queue.lock);
208 
209 	if (queue->rx_queue_len < queue->rx_queue_max)
210 		netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
211 
212 	spin_unlock_irq(&queue->rx_queue.lock);
213 }
214 
215 
216 static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
217 {
218 	struct sk_buff *skb;
219 	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
220 		kfree_skb(skb);
221 }
222 
223 static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
224 {
225 	struct sk_buff *skb;
226 
227 	for(;;) {
228 		skb = skb_peek(&queue->rx_queue);
229 		if (!skb)
230 			break;
231 		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
232 			break;
233 		xenvif_rx_dequeue(queue);
234 		kfree_skb(skb);
235 	}
236 }
237 
238 /*
239  * Returns true if we should start a new receive buffer instead of
240  * adding 'size' bytes to a buffer which currently contains 'offset'
241  * bytes.
242  */
243 static bool start_new_rx_buffer(int offset, unsigned long size, int head,
244 				bool full_coalesce)
245 {
246 	/* simple case: we have completely filled the current buffer. */
247 	if (offset == MAX_BUFFER_OFFSET)
248 		return true;
249 
250 	/*
251 	 * complex case: start a fresh buffer if the current frag
252 	 * would overflow the current buffer but only if:
253 	 *     (i)   this frag would fit completely in the next buffer
254 	 * and (ii)  there is already some data in the current buffer
255 	 * and (iii) this is not the head buffer.
256 	 * and (iv)  there is no need to fully utilize the buffers
257 	 *
258 	 * Where:
259 	 * - (i) stops us splitting a frag into two copies
260 	 *   unless the frag is too large for a single buffer.
261 	 * - (ii) stops us from leaving a buffer pointlessly empty.
262 	 * - (iii) stops us leaving the first buffer
263 	 *   empty. Strictly speaking this is already covered
264 	 *   by (ii) but is explicitly checked because
265 	 *   netfront relies on the first buffer being
266 	 *   non-empty and can crash otherwise.
267 	 * - (iv) is needed for skbs which can use up more than MAX_SKB_FRAGS
268 	 *   slot
269 	 *
270 	 * This means we will effectively linearise small
271 	 * frags but do not needlessly split large buffers
272 	 * into multiple copies tend to give large frags their
273 	 * own buffers as before.
274 	 */
275 	BUG_ON(size > MAX_BUFFER_OFFSET);
276 	if ((offset + size > MAX_BUFFER_OFFSET) && offset && !head &&
277 	    !full_coalesce)
278 		return true;
279 
280 	return false;
281 }
282 
283 struct netrx_pending_operations {
284 	unsigned copy_prod, copy_cons;
285 	unsigned meta_prod, meta_cons;
286 	struct gnttab_copy *copy;
287 	struct xenvif_rx_meta *meta;
288 	int copy_off;
289 	grant_ref_t copy_gref;
290 };
291 
292 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
293 						 struct netrx_pending_operations *npo)
294 {
295 	struct xenvif_rx_meta *meta;
296 	struct xen_netif_rx_request *req;
297 
298 	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
299 
300 	meta = npo->meta + npo->meta_prod++;
301 	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
302 	meta->gso_size = 0;
303 	meta->size = 0;
304 	meta->id = req->id;
305 
306 	npo->copy_off = 0;
307 	npo->copy_gref = req->gref;
308 
309 	return meta;
310 }
311 
312 /*
313  * Set up the grant operations for this fragment. If it's a flipping
314  * interface, we also set up the unmap request from here.
315  */
316 static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb,
317 				 struct netrx_pending_operations *npo,
318 				 struct page *page, unsigned long size,
319 				 unsigned long offset, int *head,
320 				 struct xenvif_queue *foreign_queue,
321 				 grant_ref_t foreign_gref)
322 {
323 	struct gnttab_copy *copy_gop;
324 	struct xenvif_rx_meta *meta;
325 	unsigned long bytes;
326 	int gso_type = XEN_NETIF_GSO_TYPE_NONE;
327 
328 	/* Data must not cross a page boundary. */
329 	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
330 
331 	meta = npo->meta + npo->meta_prod - 1;
332 
333 	/* Skip unused frames from start of page */
334 	page += offset >> PAGE_SHIFT;
335 	offset &= ~PAGE_MASK;
336 
337 	while (size > 0) {
338 		BUG_ON(offset >= PAGE_SIZE);
339 		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
340 
341 		bytes = PAGE_SIZE - offset;
342 
343 		if (bytes > size)
344 			bytes = size;
345 
346 		if (start_new_rx_buffer(npo->copy_off,
347 					bytes,
348 					*head,
349 					XENVIF_RX_CB(skb)->full_coalesce)) {
350 			/*
351 			 * Netfront requires there to be some data in the head
352 			 * buffer.
353 			 */
354 			BUG_ON(*head);
355 
356 			meta = get_next_rx_buffer(queue, npo);
357 		}
358 
359 		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
360 			bytes = MAX_BUFFER_OFFSET - npo->copy_off;
361 
362 		copy_gop = npo->copy + npo->copy_prod++;
363 		copy_gop->flags = GNTCOPY_dest_gref;
364 		copy_gop->len = bytes;
365 
366 		if (foreign_queue) {
367 			copy_gop->source.domid = foreign_queue->vif->domid;
368 			copy_gop->source.u.ref = foreign_gref;
369 			copy_gop->flags |= GNTCOPY_source_gref;
370 		} else {
371 			copy_gop->source.domid = DOMID_SELF;
372 			copy_gop->source.u.gmfn =
373 				virt_to_mfn(page_address(page));
374 		}
375 		copy_gop->source.offset = offset;
376 
377 		copy_gop->dest.domid = queue->vif->domid;
378 		copy_gop->dest.offset = npo->copy_off;
379 		copy_gop->dest.u.ref = npo->copy_gref;
380 
381 		npo->copy_off += bytes;
382 		meta->size += bytes;
383 
384 		offset += bytes;
385 		size -= bytes;
386 
387 		/* Next frame */
388 		if (offset == PAGE_SIZE && size) {
389 			BUG_ON(!PageCompound(page));
390 			page++;
391 			offset = 0;
392 		}
393 
394 		/* Leave a gap for the GSO descriptor. */
395 		if (skb_is_gso(skb)) {
396 			if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
397 				gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
398 			else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
399 				gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
400 		}
401 
402 		if (*head && ((1 << gso_type) & queue->vif->gso_mask))
403 			queue->rx.req_cons++;
404 
405 		*head = 0; /* There must be something in this buffer now. */
406 
407 	}
408 }
409 
410 /*
411  * Find the grant ref for a given frag in a chain of struct ubuf_info's
412  * skb: the skb itself
413  * i: the frag's number
414  * ubuf: a pointer to an element in the chain. It should not be NULL
415  *
416  * Returns a pointer to the element in the chain where the page were found. If
417  * not found, returns NULL.
418  * See the definition of callback_struct in common.h for more details about
419  * the chain.
420  */
421 static const struct ubuf_info *xenvif_find_gref(const struct sk_buff *const skb,
422 						const int i,
423 						const struct ubuf_info *ubuf)
424 {
425 	struct xenvif_queue *foreign_queue = ubuf_to_queue(ubuf);
426 
427 	do {
428 		u16 pending_idx = ubuf->desc;
429 
430 		if (skb_shinfo(skb)->frags[i].page.p ==
431 		    foreign_queue->mmap_pages[pending_idx])
432 			break;
433 		ubuf = (struct ubuf_info *) ubuf->ctx;
434 	} while (ubuf);
435 
436 	return ubuf;
437 }
438 
439 /*
440  * Prepare an SKB to be transmitted to the frontend.
441  *
442  * This function is responsible for allocating grant operations, meta
443  * structures, etc.
444  *
445  * It returns the number of meta structures consumed. The number of
446  * ring slots used is always equal to the number of meta slots used
447  * plus the number of GSO descriptors used. Currently, we use either
448  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
449  * frontend-side LRO).
450  */
451 static int xenvif_gop_skb(struct sk_buff *skb,
452 			  struct netrx_pending_operations *npo,
453 			  struct xenvif_queue *queue)
454 {
455 	struct xenvif *vif = netdev_priv(skb->dev);
456 	int nr_frags = skb_shinfo(skb)->nr_frags;
457 	int i;
458 	struct xen_netif_rx_request *req;
459 	struct xenvif_rx_meta *meta;
460 	unsigned char *data;
461 	int head = 1;
462 	int old_meta_prod;
463 	int gso_type;
464 	const struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg;
465 	const struct ubuf_info *const head_ubuf = ubuf;
466 
467 	old_meta_prod = npo->meta_prod;
468 
469 	gso_type = XEN_NETIF_GSO_TYPE_NONE;
470 	if (skb_is_gso(skb)) {
471 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
472 			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
473 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
474 			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
475 	}
476 
477 	/* Set up a GSO prefix descriptor, if necessary */
478 	if ((1 << gso_type) & vif->gso_prefix_mask) {
479 		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
480 		meta = npo->meta + npo->meta_prod++;
481 		meta->gso_type = gso_type;
482 		meta->gso_size = skb_shinfo(skb)->gso_size;
483 		meta->size = 0;
484 		meta->id = req->id;
485 	}
486 
487 	req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons++);
488 	meta = npo->meta + npo->meta_prod++;
489 
490 	if ((1 << gso_type) & vif->gso_mask) {
491 		meta->gso_type = gso_type;
492 		meta->gso_size = skb_shinfo(skb)->gso_size;
493 	} else {
494 		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
495 		meta->gso_size = 0;
496 	}
497 
498 	meta->size = 0;
499 	meta->id = req->id;
500 	npo->copy_off = 0;
501 	npo->copy_gref = req->gref;
502 
503 	data = skb->data;
504 	while (data < skb_tail_pointer(skb)) {
505 		unsigned int offset = offset_in_page(data);
506 		unsigned int len = PAGE_SIZE - offset;
507 
508 		if (data + len > skb_tail_pointer(skb))
509 			len = skb_tail_pointer(skb) - data;
510 
511 		xenvif_gop_frag_copy(queue, skb, npo,
512 				     virt_to_page(data), len, offset, &head,
513 				     NULL,
514 				     0);
515 		data += len;
516 	}
517 
518 	for (i = 0; i < nr_frags; i++) {
519 		/* This variable also signals whether foreign_gref has a real
520 		 * value or not.
521 		 */
522 		struct xenvif_queue *foreign_queue = NULL;
523 		grant_ref_t foreign_gref;
524 
525 		if ((skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) &&
526 			(ubuf->callback == &xenvif_zerocopy_callback)) {
527 			const struct ubuf_info *const startpoint = ubuf;
528 
529 			/* Ideally ubuf points to the chain element which
530 			 * belongs to this frag. Or if frags were removed from
531 			 * the beginning, then shortly before it.
532 			 */
533 			ubuf = xenvif_find_gref(skb, i, ubuf);
534 
535 			/* Try again from the beginning of the list, if we
536 			 * haven't tried from there. This only makes sense in
537 			 * the unlikely event of reordering the original frags.
538 			 * For injected local pages it's an unnecessary second
539 			 * run.
540 			 */
541 			if (unlikely(!ubuf) && startpoint != head_ubuf)
542 				ubuf = xenvif_find_gref(skb, i, head_ubuf);
543 
544 			if (likely(ubuf)) {
545 				u16 pending_idx = ubuf->desc;
546 
547 				foreign_queue = ubuf_to_queue(ubuf);
548 				foreign_gref =
549 					foreign_queue->pending_tx_info[pending_idx].req.gref;
550 				/* Just a safety measure. If this was the last
551 				 * element on the list, the for loop will
552 				 * iterate again if a local page were added to
553 				 * the end. Using head_ubuf here prevents the
554 				 * second search on the chain. Or the original
555 				 * frags changed order, but that's less likely.
556 				 * In any way, ubuf shouldn't be NULL.
557 				 */
558 				ubuf = ubuf->ctx ?
559 					(struct ubuf_info *) ubuf->ctx :
560 					head_ubuf;
561 			} else
562 				/* This frag was a local page, added to the
563 				 * array after the skb left netback.
564 				 */
565 				ubuf = head_ubuf;
566 		}
567 		xenvif_gop_frag_copy(queue, skb, npo,
568 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
569 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
570 				     skb_shinfo(skb)->frags[i].page_offset,
571 				     &head,
572 				     foreign_queue,
573 				     foreign_queue ? foreign_gref : UINT_MAX);
574 	}
575 
576 	return npo->meta_prod - old_meta_prod;
577 }
578 
579 /*
580  * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
581  * used to set up the operations on the top of
582  * netrx_pending_operations, which have since been done.  Check that
583  * they didn't give any errors and advance over them.
584  */
585 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
586 			    struct netrx_pending_operations *npo)
587 {
588 	struct gnttab_copy     *copy_op;
589 	int status = XEN_NETIF_RSP_OKAY;
590 	int i;
591 
592 	for (i = 0; i < nr_meta_slots; i++) {
593 		copy_op = npo->copy + npo->copy_cons++;
594 		if (copy_op->status != GNTST_okay) {
595 			netdev_dbg(vif->dev,
596 				   "Bad status %d from copy to DOM%d.\n",
597 				   copy_op->status, vif->domid);
598 			status = XEN_NETIF_RSP_ERROR;
599 		}
600 	}
601 
602 	return status;
603 }
604 
605 static void xenvif_add_frag_responses(struct xenvif_queue *queue, int status,
606 				      struct xenvif_rx_meta *meta,
607 				      int nr_meta_slots)
608 {
609 	int i;
610 	unsigned long offset;
611 
612 	/* No fragments used */
613 	if (nr_meta_slots <= 1)
614 		return;
615 
616 	nr_meta_slots--;
617 
618 	for (i = 0; i < nr_meta_slots; i++) {
619 		int flags;
620 		if (i == nr_meta_slots - 1)
621 			flags = 0;
622 		else
623 			flags = XEN_NETRXF_more_data;
624 
625 		offset = 0;
626 		make_rx_response(queue, meta[i].id, status, offset,
627 				 meta[i].size, flags);
628 	}
629 }
630 
631 void xenvif_kick_thread(struct xenvif_queue *queue)
632 {
633 	wake_up(&queue->wq);
634 }
635 
636 static void xenvif_rx_action(struct xenvif_queue *queue)
637 {
638 	s8 status;
639 	u16 flags;
640 	struct xen_netif_rx_response *resp;
641 	struct sk_buff_head rxq;
642 	struct sk_buff *skb;
643 	LIST_HEAD(notify);
644 	int ret;
645 	unsigned long offset;
646 	bool need_to_notify = false;
647 
648 	struct netrx_pending_operations npo = {
649 		.copy  = queue->grant_copy_op,
650 		.meta  = queue->meta,
651 	};
652 
653 	skb_queue_head_init(&rxq);
654 
655 	while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
656 	       && (skb = xenvif_rx_dequeue(queue)) != NULL) {
657 		RING_IDX max_slots_needed;
658 		RING_IDX old_req_cons;
659 		RING_IDX ring_slots_used;
660 		int i;
661 
662 		queue->last_rx_time = jiffies;
663 
664 		/* We need a cheap worse case estimate for the number of
665 		 * slots we'll use.
666 		 */
667 
668 		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
669 						skb_headlen(skb),
670 						PAGE_SIZE);
671 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
672 			unsigned int size;
673 			unsigned int offset;
674 
675 			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
676 			offset = skb_shinfo(skb)->frags[i].page_offset;
677 
678 			/* For a worse-case estimate we need to factor in
679 			 * the fragment page offset as this will affect the
680 			 * number of times xenvif_gop_frag_copy() will
681 			 * call start_new_rx_buffer().
682 			 */
683 			max_slots_needed += DIV_ROUND_UP(offset + size,
684 							 PAGE_SIZE);
685 		}
686 
687 		/* To avoid the estimate becoming too pessimal for some
688 		 * frontends that limit posted rx requests, cap the estimate
689 		 * at MAX_SKB_FRAGS. In this case netback will fully coalesce
690 		 * the skb into the provided slots.
691 		 */
692 		if (max_slots_needed > MAX_SKB_FRAGS) {
693 			max_slots_needed = MAX_SKB_FRAGS;
694 			XENVIF_RX_CB(skb)->full_coalesce = true;
695 		} else {
696 			XENVIF_RX_CB(skb)->full_coalesce = false;
697 		}
698 
699 		/* We may need one more slot for GSO metadata */
700 		if (skb_is_gso(skb) &&
701 		   (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
702 		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
703 			max_slots_needed++;
704 
705 		old_req_cons = queue->rx.req_cons;
706 		XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
707 		ring_slots_used = queue->rx.req_cons - old_req_cons;
708 
709 		BUG_ON(ring_slots_used > max_slots_needed);
710 
711 		__skb_queue_tail(&rxq, skb);
712 	}
713 
714 	BUG_ON(npo.meta_prod > ARRAY_SIZE(queue->meta));
715 
716 	if (!npo.copy_prod)
717 		goto done;
718 
719 	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
720 	gnttab_batch_copy(queue->grant_copy_op, npo.copy_prod);
721 
722 	while ((skb = __skb_dequeue(&rxq)) != NULL) {
723 
724 		if ((1 << queue->meta[npo.meta_cons].gso_type) &
725 		    queue->vif->gso_prefix_mask) {
726 			resp = RING_GET_RESPONSE(&queue->rx,
727 						 queue->rx.rsp_prod_pvt++);
728 
729 			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
730 
731 			resp->offset = queue->meta[npo.meta_cons].gso_size;
732 			resp->id = queue->meta[npo.meta_cons].id;
733 			resp->status = XENVIF_RX_CB(skb)->meta_slots_used;
734 
735 			npo.meta_cons++;
736 			XENVIF_RX_CB(skb)->meta_slots_used--;
737 		}
738 
739 
740 		queue->stats.tx_bytes += skb->len;
741 		queue->stats.tx_packets++;
742 
743 		status = xenvif_check_gop(queue->vif,
744 					  XENVIF_RX_CB(skb)->meta_slots_used,
745 					  &npo);
746 
747 		if (XENVIF_RX_CB(skb)->meta_slots_used == 1)
748 			flags = 0;
749 		else
750 			flags = XEN_NETRXF_more_data;
751 
752 		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
753 			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
754 		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
755 			/* remote but checksummed. */
756 			flags |= XEN_NETRXF_data_validated;
757 
758 		offset = 0;
759 		resp = make_rx_response(queue, queue->meta[npo.meta_cons].id,
760 					status, offset,
761 					queue->meta[npo.meta_cons].size,
762 					flags);
763 
764 		if ((1 << queue->meta[npo.meta_cons].gso_type) &
765 		    queue->vif->gso_mask) {
766 			struct xen_netif_extra_info *gso =
767 				(struct xen_netif_extra_info *)
768 				RING_GET_RESPONSE(&queue->rx,
769 						  queue->rx.rsp_prod_pvt++);
770 
771 			resp->flags |= XEN_NETRXF_extra_info;
772 
773 			gso->u.gso.type = queue->meta[npo.meta_cons].gso_type;
774 			gso->u.gso.size = queue->meta[npo.meta_cons].gso_size;
775 			gso->u.gso.pad = 0;
776 			gso->u.gso.features = 0;
777 
778 			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
779 			gso->flags = 0;
780 		}
781 
782 		xenvif_add_frag_responses(queue, status,
783 					  queue->meta + npo.meta_cons + 1,
784 					  XENVIF_RX_CB(skb)->meta_slots_used);
785 
786 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, ret);
787 
788 		need_to_notify |= !!ret;
789 
790 		npo.meta_cons += XENVIF_RX_CB(skb)->meta_slots_used;
791 		dev_kfree_skb(skb);
792 	}
793 
794 done:
795 	if (need_to_notify)
796 		notify_remote_via_irq(queue->rx_irq);
797 }
798 
799 void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
800 {
801 	int more_to_do;
802 
803 	RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
804 
805 	if (more_to_do)
806 		napi_schedule(&queue->napi);
807 }
808 
809 static void tx_add_credit(struct xenvif_queue *queue)
810 {
811 	unsigned long max_burst, max_credit;
812 
813 	/*
814 	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
815 	 * Otherwise the interface can seize up due to insufficient credit.
816 	 */
817 	max_burst = RING_GET_REQUEST(&queue->tx, queue->tx.req_cons)->size;
818 	max_burst = min(max_burst, 131072UL);
819 	max_burst = max(max_burst, queue->credit_bytes);
820 
821 	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
822 	max_credit = queue->remaining_credit + queue->credit_bytes;
823 	if (max_credit < queue->remaining_credit)
824 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
825 
826 	queue->remaining_credit = min(max_credit, max_burst);
827 }
828 
829 static void tx_credit_callback(unsigned long data)
830 {
831 	struct xenvif_queue *queue = (struct xenvif_queue *)data;
832 	tx_add_credit(queue);
833 	xenvif_napi_schedule_or_enable_events(queue);
834 }
835 
836 static void xenvif_tx_err(struct xenvif_queue *queue,
837 			  struct xen_netif_tx_request *txp, RING_IDX end)
838 {
839 	RING_IDX cons = queue->tx.req_cons;
840 	unsigned long flags;
841 
842 	do {
843 		spin_lock_irqsave(&queue->response_lock, flags);
844 		make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR);
845 		spin_unlock_irqrestore(&queue->response_lock, flags);
846 		if (cons == end)
847 			break;
848 		txp = RING_GET_REQUEST(&queue->tx, cons++);
849 	} while (1);
850 	queue->tx.req_cons = cons;
851 }
852 
853 static void xenvif_fatal_tx_err(struct xenvif *vif)
854 {
855 	netdev_err(vif->dev, "fatal error; disabling device\n");
856 	vif->disabled = true;
857 	/* Disable the vif from queue 0's kthread */
858 	if (vif->queues)
859 		xenvif_kick_thread(&vif->queues[0]);
860 }
861 
862 static int xenvif_count_requests(struct xenvif_queue *queue,
863 				 struct xen_netif_tx_request *first,
864 				 struct xen_netif_tx_request *txp,
865 				 int work_to_do)
866 {
867 	RING_IDX cons = queue->tx.req_cons;
868 	int slots = 0;
869 	int drop_err = 0;
870 	int more_data;
871 
872 	if (!(first->flags & XEN_NETTXF_more_data))
873 		return 0;
874 
875 	do {
876 		struct xen_netif_tx_request dropped_tx = { 0 };
877 
878 		if (slots >= work_to_do) {
879 			netdev_err(queue->vif->dev,
880 				   "Asked for %d slots but exceeds this limit\n",
881 				   work_to_do);
882 			xenvif_fatal_tx_err(queue->vif);
883 			return -ENODATA;
884 		}
885 
886 		/* This guest is really using too many slots and
887 		 * considered malicious.
888 		 */
889 		if (unlikely(slots >= fatal_skb_slots)) {
890 			netdev_err(queue->vif->dev,
891 				   "Malicious frontend using %d slots, threshold %u\n",
892 				   slots, fatal_skb_slots);
893 			xenvif_fatal_tx_err(queue->vif);
894 			return -E2BIG;
895 		}
896 
897 		/* Xen network protocol had implicit dependency on
898 		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
899 		 * the historical MAX_SKB_FRAGS value 18 to honor the
900 		 * same behavior as before. Any packet using more than
901 		 * 18 slots but less than fatal_skb_slots slots is
902 		 * dropped
903 		 */
904 		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
905 			if (net_ratelimit())
906 				netdev_dbg(queue->vif->dev,
907 					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
908 					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
909 			drop_err = -E2BIG;
910 		}
911 
912 		if (drop_err)
913 			txp = &dropped_tx;
914 
915 		memcpy(txp, RING_GET_REQUEST(&queue->tx, cons + slots),
916 		       sizeof(*txp));
917 
918 		/* If the guest submitted a frame >= 64 KiB then
919 		 * first->size overflowed and following slots will
920 		 * appear to be larger than the frame.
921 		 *
922 		 * This cannot be fatal error as there are buggy
923 		 * frontends that do this.
924 		 *
925 		 * Consume all slots and drop the packet.
926 		 */
927 		if (!drop_err && txp->size > first->size) {
928 			if (net_ratelimit())
929 				netdev_dbg(queue->vif->dev,
930 					   "Invalid tx request, slot size %u > remaining size %u\n",
931 					   txp->size, first->size);
932 			drop_err = -EIO;
933 		}
934 
935 		first->size -= txp->size;
936 		slots++;
937 
938 		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
939 			netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
940 				 txp->offset, txp->size);
941 			xenvif_fatal_tx_err(queue->vif);
942 			return -EINVAL;
943 		}
944 
945 		more_data = txp->flags & XEN_NETTXF_more_data;
946 
947 		if (!drop_err)
948 			txp++;
949 
950 	} while (more_data);
951 
952 	if (drop_err) {
953 		xenvif_tx_err(queue, first, cons + slots);
954 		return drop_err;
955 	}
956 
957 	return slots;
958 }
959 
960 
961 struct xenvif_tx_cb {
962 	u16 pending_idx;
963 };
964 
965 #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
966 
967 static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
968 					  u16 pending_idx,
969 					  struct xen_netif_tx_request *txp,
970 					  struct gnttab_map_grant_ref *mop)
971 {
972 	queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
973 	gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
974 			  GNTMAP_host_map | GNTMAP_readonly,
975 			  txp->gref, queue->vif->domid);
976 
977 	memcpy(&queue->pending_tx_info[pending_idx].req, txp,
978 	       sizeof(*txp));
979 }
980 
981 static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
982 {
983 	struct sk_buff *skb =
984 		alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
985 			  GFP_ATOMIC | __GFP_NOWARN);
986 	if (unlikely(skb == NULL))
987 		return NULL;
988 
989 	/* Packets passed to netif_rx() must have some headroom. */
990 	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
991 
992 	/* Initialize it here to avoid later surprises */
993 	skb_shinfo(skb)->destructor_arg = NULL;
994 
995 	return skb;
996 }
997 
998 static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
999 							struct sk_buff *skb,
1000 							struct xen_netif_tx_request *txp,
1001 							struct gnttab_map_grant_ref *gop)
1002 {
1003 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1004 	skb_frag_t *frags = shinfo->frags;
1005 	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1006 	int start;
1007 	pending_ring_idx_t index;
1008 	unsigned int nr_slots, frag_overflow = 0;
1009 
1010 	/* At this point shinfo->nr_frags is in fact the number of
1011 	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1012 	 */
1013 	if (shinfo->nr_frags > MAX_SKB_FRAGS) {
1014 		frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
1015 		BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1016 		shinfo->nr_frags = MAX_SKB_FRAGS;
1017 	}
1018 	nr_slots = shinfo->nr_frags;
1019 
1020 	/* Skip first skb fragment if it is on same page as header fragment. */
1021 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
1022 
1023 	for (shinfo->nr_frags = start; shinfo->nr_frags < nr_slots;
1024 	     shinfo->nr_frags++, txp++, gop++) {
1025 		index = pending_index(queue->pending_cons++);
1026 		pending_idx = queue->pending_ring[index];
1027 		xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
1028 		frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
1029 	}
1030 
1031 	if (frag_overflow) {
1032 		struct sk_buff *nskb = xenvif_alloc_skb(0);
1033 		if (unlikely(nskb == NULL)) {
1034 			if (net_ratelimit())
1035 				netdev_err(queue->vif->dev,
1036 					   "Can't allocate the frag_list skb.\n");
1037 			return NULL;
1038 		}
1039 
1040 		shinfo = skb_shinfo(nskb);
1041 		frags = shinfo->frags;
1042 
1043 		for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow;
1044 		     shinfo->nr_frags++, txp++, gop++) {
1045 			index = pending_index(queue->pending_cons++);
1046 			pending_idx = queue->pending_ring[index];
1047 			xenvif_tx_create_map_op(queue, pending_idx, txp, gop);
1048 			frag_set_pending_idx(&frags[shinfo->nr_frags],
1049 					     pending_idx);
1050 		}
1051 
1052 		skb_shinfo(skb)->frag_list = nskb;
1053 	}
1054 
1055 	return gop;
1056 }
1057 
1058 static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
1059 					   u16 pending_idx,
1060 					   grant_handle_t handle)
1061 {
1062 	if (unlikely(queue->grant_tx_handle[pending_idx] !=
1063 		     NETBACK_INVALID_HANDLE)) {
1064 		netdev_err(queue->vif->dev,
1065 			   "Trying to overwrite active handle! pending_idx: %x\n",
1066 			   pending_idx);
1067 		BUG();
1068 	}
1069 	queue->grant_tx_handle[pending_idx] = handle;
1070 }
1071 
1072 static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
1073 					     u16 pending_idx)
1074 {
1075 	if (unlikely(queue->grant_tx_handle[pending_idx] ==
1076 		     NETBACK_INVALID_HANDLE)) {
1077 		netdev_err(queue->vif->dev,
1078 			   "Trying to unmap invalid handle! pending_idx: %x\n",
1079 			   pending_idx);
1080 		BUG();
1081 	}
1082 	queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
1083 }
1084 
1085 static int xenvif_tx_check_gop(struct xenvif_queue *queue,
1086 			       struct sk_buff *skb,
1087 			       struct gnttab_map_grant_ref **gopp_map,
1088 			       struct gnttab_copy **gopp_copy)
1089 {
1090 	struct gnttab_map_grant_ref *gop_map = *gopp_map;
1091 	u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1092 	/* This always points to the shinfo of the skb being checked, which
1093 	 * could be either the first or the one on the frag_list
1094 	 */
1095 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1096 	/* If this is non-NULL, we are currently checking the frag_list skb, and
1097 	 * this points to the shinfo of the first one
1098 	 */
1099 	struct skb_shared_info *first_shinfo = NULL;
1100 	int nr_frags = shinfo->nr_frags;
1101 	const bool sharedslot = nr_frags &&
1102 				frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
1103 	int i, err;
1104 
1105 	/* Check status of header. */
1106 	err = (*gopp_copy)->status;
1107 	if (unlikely(err)) {
1108 		if (net_ratelimit())
1109 			netdev_dbg(queue->vif->dev,
1110 				   "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
1111 				   (*gopp_copy)->status,
1112 				   pending_idx,
1113 				   (*gopp_copy)->source.u.ref);
1114 		/* The first frag might still have this slot mapped */
1115 		if (!sharedslot)
1116 			xenvif_idx_release(queue, pending_idx,
1117 					   XEN_NETIF_RSP_ERROR);
1118 	}
1119 	(*gopp_copy)++;
1120 
1121 check_frags:
1122 	for (i = 0; i < nr_frags; i++, gop_map++) {
1123 		int j, newerr;
1124 
1125 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
1126 
1127 		/* Check error status: if okay then remember grant handle. */
1128 		newerr = gop_map->status;
1129 
1130 		if (likely(!newerr)) {
1131 			xenvif_grant_handle_set(queue,
1132 						pending_idx,
1133 						gop_map->handle);
1134 			/* Had a previous error? Invalidate this fragment. */
1135 			if (unlikely(err)) {
1136 				xenvif_idx_unmap(queue, pending_idx);
1137 				/* If the mapping of the first frag was OK, but
1138 				 * the header's copy failed, and they are
1139 				 * sharing a slot, send an error
1140 				 */
1141 				if (i == 0 && sharedslot)
1142 					xenvif_idx_release(queue, pending_idx,
1143 							   XEN_NETIF_RSP_ERROR);
1144 				else
1145 					xenvif_idx_release(queue, pending_idx,
1146 							   XEN_NETIF_RSP_OKAY);
1147 			}
1148 			continue;
1149 		}
1150 
1151 		/* Error on this fragment: respond to client with an error. */
1152 		if (net_ratelimit())
1153 			netdev_dbg(queue->vif->dev,
1154 				   "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
1155 				   i,
1156 				   gop_map->status,
1157 				   pending_idx,
1158 				   gop_map->ref);
1159 
1160 		xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
1161 
1162 		/* Not the first error? Preceding frags already invalidated. */
1163 		if (err)
1164 			continue;
1165 
1166 		/* First error: if the header haven't shared a slot with the
1167 		 * first frag, release it as well.
1168 		 */
1169 		if (!sharedslot)
1170 			xenvif_idx_release(queue,
1171 					   XENVIF_TX_CB(skb)->pending_idx,
1172 					   XEN_NETIF_RSP_OKAY);
1173 
1174 		/* Invalidate preceding fragments of this skb. */
1175 		for (j = 0; j < i; j++) {
1176 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
1177 			xenvif_idx_unmap(queue, pending_idx);
1178 			xenvif_idx_release(queue, pending_idx,
1179 					   XEN_NETIF_RSP_OKAY);
1180 		}
1181 
1182 		/* And if we found the error while checking the frag_list, unmap
1183 		 * the first skb's frags
1184 		 */
1185 		if (first_shinfo) {
1186 			for (j = 0; j < first_shinfo->nr_frags; j++) {
1187 				pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
1188 				xenvif_idx_unmap(queue, pending_idx);
1189 				xenvif_idx_release(queue, pending_idx,
1190 						   XEN_NETIF_RSP_OKAY);
1191 			}
1192 		}
1193 
1194 		/* Remember the error: invalidate all subsequent fragments. */
1195 		err = newerr;
1196 	}
1197 
1198 	if (skb_has_frag_list(skb) && !first_shinfo) {
1199 		first_shinfo = skb_shinfo(skb);
1200 		shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
1201 		nr_frags = shinfo->nr_frags;
1202 
1203 		goto check_frags;
1204 	}
1205 
1206 	*gopp_map = gop_map;
1207 	return err;
1208 }
1209 
1210 static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
1211 {
1212 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1213 	int nr_frags = shinfo->nr_frags;
1214 	int i;
1215 	u16 prev_pending_idx = INVALID_PENDING_IDX;
1216 
1217 	for (i = 0; i < nr_frags; i++) {
1218 		skb_frag_t *frag = shinfo->frags + i;
1219 		struct xen_netif_tx_request *txp;
1220 		struct page *page;
1221 		u16 pending_idx;
1222 
1223 		pending_idx = frag_get_pending_idx(frag);
1224 
1225 		/* If this is not the first frag, chain it to the previous*/
1226 		if (prev_pending_idx == INVALID_PENDING_IDX)
1227 			skb_shinfo(skb)->destructor_arg =
1228 				&callback_param(queue, pending_idx);
1229 		else
1230 			callback_param(queue, prev_pending_idx).ctx =
1231 				&callback_param(queue, pending_idx);
1232 
1233 		callback_param(queue, pending_idx).ctx = NULL;
1234 		prev_pending_idx = pending_idx;
1235 
1236 		txp = &queue->pending_tx_info[pending_idx].req;
1237 		page = virt_to_page(idx_to_kaddr(queue, pending_idx));
1238 		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
1239 		skb->len += txp->size;
1240 		skb->data_len += txp->size;
1241 		skb->truesize += txp->size;
1242 
1243 		/* Take an extra reference to offset network stack's put_page */
1244 		get_page(queue->mmap_pages[pending_idx]);
1245 	}
1246 	/* FIXME: __skb_fill_page_desc set this to true because page->pfmemalloc
1247 	 * overlaps with "index", and "mapping" is not set. I think mapping
1248 	 * should be set. If delivered to local stack, it would drop this
1249 	 * skb in sk_filter unless the socket has the right to use it.
1250 	 */
1251 	skb->pfmemalloc	= false;
1252 }
1253 
1254 static int xenvif_get_extras(struct xenvif_queue *queue,
1255 				struct xen_netif_extra_info *extras,
1256 				int work_to_do)
1257 {
1258 	struct xen_netif_extra_info extra;
1259 	RING_IDX cons = queue->tx.req_cons;
1260 
1261 	do {
1262 		if (unlikely(work_to_do-- <= 0)) {
1263 			netdev_err(queue->vif->dev, "Missing extra info\n");
1264 			xenvif_fatal_tx_err(queue->vif);
1265 			return -EBADR;
1266 		}
1267 
1268 		memcpy(&extra, RING_GET_REQUEST(&queue->tx, cons),
1269 		       sizeof(extra));
1270 		if (unlikely(!extra.type ||
1271 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1272 			queue->tx.req_cons = ++cons;
1273 			netdev_err(queue->vif->dev,
1274 				   "Invalid extra type: %d\n", extra.type);
1275 			xenvif_fatal_tx_err(queue->vif);
1276 			return -EINVAL;
1277 		}
1278 
1279 		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1280 		queue->tx.req_cons = ++cons;
1281 	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1282 
1283 	return work_to_do;
1284 }
1285 
1286 static int xenvif_set_skb_gso(struct xenvif *vif,
1287 			      struct sk_buff *skb,
1288 			      struct xen_netif_extra_info *gso)
1289 {
1290 	if (!gso->u.gso.size) {
1291 		netdev_err(vif->dev, "GSO size must not be zero.\n");
1292 		xenvif_fatal_tx_err(vif);
1293 		return -EINVAL;
1294 	}
1295 
1296 	switch (gso->u.gso.type) {
1297 	case XEN_NETIF_GSO_TYPE_TCPV4:
1298 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1299 		break;
1300 	case XEN_NETIF_GSO_TYPE_TCPV6:
1301 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1302 		break;
1303 	default:
1304 		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1305 		xenvif_fatal_tx_err(vif);
1306 		return -EINVAL;
1307 	}
1308 
1309 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1310 	/* gso_segs will be calculated later */
1311 
1312 	return 0;
1313 }
1314 
1315 static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
1316 {
1317 	bool recalculate_partial_csum = false;
1318 
1319 	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1320 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1321 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1322 	 * recalculate the partial checksum.
1323 	 */
1324 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1325 		queue->stats.rx_gso_checksum_fixup++;
1326 		skb->ip_summed = CHECKSUM_PARTIAL;
1327 		recalculate_partial_csum = true;
1328 	}
1329 
1330 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1331 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1332 		return 0;
1333 
1334 	return skb_checksum_setup(skb, recalculate_partial_csum);
1335 }
1336 
1337 static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
1338 {
1339 	u64 now = get_jiffies_64();
1340 	u64 next_credit = queue->credit_window_start +
1341 		msecs_to_jiffies(queue->credit_usec / 1000);
1342 
1343 	/* Timer could already be pending in rare cases. */
1344 	if (timer_pending(&queue->credit_timeout))
1345 		return true;
1346 
1347 	/* Passed the point where we can replenish credit? */
1348 	if (time_after_eq64(now, next_credit)) {
1349 		queue->credit_window_start = now;
1350 		tx_add_credit(queue);
1351 	}
1352 
1353 	/* Still too big to send right now? Set a callback. */
1354 	if (size > queue->remaining_credit) {
1355 		queue->credit_timeout.data     =
1356 			(unsigned long)queue;
1357 		queue->credit_timeout.function =
1358 			tx_credit_callback;
1359 		mod_timer(&queue->credit_timeout,
1360 			  next_credit);
1361 		queue->credit_window_start = next_credit;
1362 
1363 		return true;
1364 	}
1365 
1366 	return false;
1367 }
1368 
1369 static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1370 				     int budget,
1371 				     unsigned *copy_ops,
1372 				     unsigned *map_ops)
1373 {
1374 	struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop;
1375 	struct sk_buff *skb;
1376 	int ret;
1377 
1378 	while (skb_queue_len(&queue->tx_queue) < budget) {
1379 		struct xen_netif_tx_request txreq;
1380 		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1381 		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1382 		u16 pending_idx;
1383 		RING_IDX idx;
1384 		int work_to_do;
1385 		unsigned int data_len;
1386 		pending_ring_idx_t index;
1387 
1388 		if (queue->tx.sring->req_prod - queue->tx.req_cons >
1389 		    XEN_NETIF_TX_RING_SIZE) {
1390 			netdev_err(queue->vif->dev,
1391 				   "Impossible number of requests. "
1392 				   "req_prod %d, req_cons %d, size %ld\n",
1393 				   queue->tx.sring->req_prod, queue->tx.req_cons,
1394 				   XEN_NETIF_TX_RING_SIZE);
1395 			xenvif_fatal_tx_err(queue->vif);
1396 			break;
1397 		}
1398 
1399 		work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
1400 		if (!work_to_do)
1401 			break;
1402 
1403 		idx = queue->tx.req_cons;
1404 		rmb(); /* Ensure that we see the request before we copy it. */
1405 		memcpy(&txreq, RING_GET_REQUEST(&queue->tx, idx), sizeof(txreq));
1406 
1407 		/* Credit-based scheduling. */
1408 		if (txreq.size > queue->remaining_credit &&
1409 		    tx_credit_exceeded(queue, txreq.size))
1410 			break;
1411 
1412 		queue->remaining_credit -= txreq.size;
1413 
1414 		work_to_do--;
1415 		queue->tx.req_cons = ++idx;
1416 
1417 		memset(extras, 0, sizeof(extras));
1418 		if (txreq.flags & XEN_NETTXF_extra_info) {
1419 			work_to_do = xenvif_get_extras(queue, extras,
1420 						       work_to_do);
1421 			idx = queue->tx.req_cons;
1422 			if (unlikely(work_to_do < 0))
1423 				break;
1424 		}
1425 
1426 		ret = xenvif_count_requests(queue, &txreq, txfrags, work_to_do);
1427 		if (unlikely(ret < 0))
1428 			break;
1429 
1430 		idx += ret;
1431 
1432 		if (unlikely(txreq.size < ETH_HLEN)) {
1433 			netdev_dbg(queue->vif->dev,
1434 				   "Bad packet size: %d\n", txreq.size);
1435 			xenvif_tx_err(queue, &txreq, idx);
1436 			break;
1437 		}
1438 
1439 		/* No crossing a page as the payload mustn't fragment. */
1440 		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1441 			netdev_err(queue->vif->dev,
1442 				   "txreq.offset: %x, size: %u, end: %lu\n",
1443 				   txreq.offset, txreq.size,
1444 				   (txreq.offset&~PAGE_MASK) + txreq.size);
1445 			xenvif_fatal_tx_err(queue->vif);
1446 			break;
1447 		}
1448 
1449 		index = pending_index(queue->pending_cons);
1450 		pending_idx = queue->pending_ring[index];
1451 
1452 		data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN &&
1453 			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1454 			XEN_NETBACK_TX_COPY_LEN : txreq.size;
1455 
1456 		skb = xenvif_alloc_skb(data_len);
1457 		if (unlikely(skb == NULL)) {
1458 			netdev_dbg(queue->vif->dev,
1459 				   "Can't allocate a skb in start_xmit.\n");
1460 			xenvif_tx_err(queue, &txreq, idx);
1461 			break;
1462 		}
1463 
1464 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1465 			struct xen_netif_extra_info *gso;
1466 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1467 
1468 			if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1469 				/* Failure in xenvif_set_skb_gso is fatal. */
1470 				kfree_skb(skb);
1471 				break;
1472 			}
1473 		}
1474 
1475 		XENVIF_TX_CB(skb)->pending_idx = pending_idx;
1476 
1477 		__skb_put(skb, data_len);
1478 		queue->tx_copy_ops[*copy_ops].source.u.ref = txreq.gref;
1479 		queue->tx_copy_ops[*copy_ops].source.domid = queue->vif->domid;
1480 		queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
1481 
1482 		queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
1483 			virt_to_mfn(skb->data);
1484 		queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
1485 		queue->tx_copy_ops[*copy_ops].dest.offset =
1486 			offset_in_page(skb->data);
1487 
1488 		queue->tx_copy_ops[*copy_ops].len = data_len;
1489 		queue->tx_copy_ops[*copy_ops].flags = GNTCOPY_source_gref;
1490 
1491 		(*copy_ops)++;
1492 
1493 		skb_shinfo(skb)->nr_frags = ret;
1494 		if (data_len < txreq.size) {
1495 			skb_shinfo(skb)->nr_frags++;
1496 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1497 					     pending_idx);
1498 			xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
1499 			gop++;
1500 		} else {
1501 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1502 					     INVALID_PENDING_IDX);
1503 			memcpy(&queue->pending_tx_info[pending_idx].req, &txreq,
1504 			       sizeof(txreq));
1505 		}
1506 
1507 		queue->pending_cons++;
1508 
1509 		request_gop = xenvif_get_requests(queue, skb, txfrags, gop);
1510 		if (request_gop == NULL) {
1511 			kfree_skb(skb);
1512 			xenvif_tx_err(queue, &txreq, idx);
1513 			break;
1514 		}
1515 		gop = request_gop;
1516 
1517 		__skb_queue_tail(&queue->tx_queue, skb);
1518 
1519 		queue->tx.req_cons = idx;
1520 
1521 		if (((gop-queue->tx_map_ops) >= ARRAY_SIZE(queue->tx_map_ops)) ||
1522 		    (*copy_ops >= ARRAY_SIZE(queue->tx_copy_ops)))
1523 			break;
1524 	}
1525 
1526 	(*map_ops) = gop - queue->tx_map_ops;
1527 	return;
1528 }
1529 
1530 /* Consolidate skb with a frag_list into a brand new one with local pages on
1531  * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1532  */
1533 static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1534 {
1535 	unsigned int offset = skb_headlen(skb);
1536 	skb_frag_t frags[MAX_SKB_FRAGS];
1537 	int i;
1538 	struct ubuf_info *uarg;
1539 	struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1540 
1541 	queue->stats.tx_zerocopy_sent += 2;
1542 	queue->stats.tx_frag_overflow++;
1543 
1544 	xenvif_fill_frags(queue, nskb);
1545 	/* Subtract frags size, we will correct it later */
1546 	skb->truesize -= skb->data_len;
1547 	skb->len += nskb->len;
1548 	skb->data_len += nskb->len;
1549 
1550 	/* create a brand new frags array and coalesce there */
1551 	for (i = 0; offset < skb->len; i++) {
1552 		struct page *page;
1553 		unsigned int len;
1554 
1555 		BUG_ON(i >= MAX_SKB_FRAGS);
1556 		page = alloc_page(GFP_ATOMIC);
1557 		if (!page) {
1558 			int j;
1559 			skb->truesize += skb->data_len;
1560 			for (j = 0; j < i; j++)
1561 				put_page(frags[j].page.p);
1562 			return -ENOMEM;
1563 		}
1564 
1565 		if (offset + PAGE_SIZE < skb->len)
1566 			len = PAGE_SIZE;
1567 		else
1568 			len = skb->len - offset;
1569 		if (skb_copy_bits(skb, offset, page_address(page), len))
1570 			BUG();
1571 
1572 		offset += len;
1573 		frags[i].page.p = page;
1574 		frags[i].page_offset = 0;
1575 		skb_frag_size_set(&frags[i], len);
1576 	}
1577 	/* swap out with old one */
1578 	memcpy(skb_shinfo(skb)->frags,
1579 	       frags,
1580 	       i * sizeof(skb_frag_t));
1581 	skb_shinfo(skb)->nr_frags = i;
1582 	skb->truesize += i * PAGE_SIZE;
1583 
1584 	/* remove traces of mapped pages and frag_list */
1585 	skb_frag_list_init(skb);
1586 	uarg = skb_shinfo(skb)->destructor_arg;
1587 	/* increase inflight counter to offset decrement in callback */
1588 	atomic_inc(&queue->inflight_packets);
1589 	uarg->callback(uarg, true);
1590 	skb_shinfo(skb)->destructor_arg = NULL;
1591 
1592 	xenvif_skb_zerocopy_prepare(queue, nskb);
1593 	kfree_skb(nskb);
1594 
1595 	return 0;
1596 }
1597 
1598 static int xenvif_tx_submit(struct xenvif_queue *queue)
1599 {
1600 	struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1601 	struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1602 	struct sk_buff *skb;
1603 	int work_done = 0;
1604 
1605 	while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1606 		struct xen_netif_tx_request *txp;
1607 		u16 pending_idx;
1608 		unsigned data_len;
1609 
1610 		pending_idx = XENVIF_TX_CB(skb)->pending_idx;
1611 		txp = &queue->pending_tx_info[pending_idx].req;
1612 
1613 		/* Check the remap error code. */
1614 		if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1615 			/* If there was an error, xenvif_tx_check_gop is
1616 			 * expected to release all the frags which were mapped,
1617 			 * so kfree_skb shouldn't do it again
1618 			 */
1619 			skb_shinfo(skb)->nr_frags = 0;
1620 			if (skb_has_frag_list(skb)) {
1621 				struct sk_buff *nskb =
1622 						skb_shinfo(skb)->frag_list;
1623 				skb_shinfo(nskb)->nr_frags = 0;
1624 			}
1625 			kfree_skb(skb);
1626 			continue;
1627 		}
1628 
1629 		data_len = skb->len;
1630 		callback_param(queue, pending_idx).ctx = NULL;
1631 		if (data_len < txp->size) {
1632 			/* Append the packet payload as a fragment. */
1633 			txp->offset += data_len;
1634 			txp->size -= data_len;
1635 		} else {
1636 			/* Schedule a response immediately. */
1637 			xenvif_idx_release(queue, pending_idx,
1638 					   XEN_NETIF_RSP_OKAY);
1639 		}
1640 
1641 		if (txp->flags & XEN_NETTXF_csum_blank)
1642 			skb->ip_summed = CHECKSUM_PARTIAL;
1643 		else if (txp->flags & XEN_NETTXF_data_validated)
1644 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1645 
1646 		xenvif_fill_frags(queue, skb);
1647 
1648 		if (unlikely(skb_has_frag_list(skb))) {
1649 			if (xenvif_handle_frag_list(queue, skb)) {
1650 				if (net_ratelimit())
1651 					netdev_err(queue->vif->dev,
1652 						   "Not enough memory to consolidate frag_list!\n");
1653 				xenvif_skb_zerocopy_prepare(queue, skb);
1654 				kfree_skb(skb);
1655 				continue;
1656 			}
1657 		}
1658 
1659 		skb->dev      = queue->vif->dev;
1660 		skb->protocol = eth_type_trans(skb, skb->dev);
1661 		skb_reset_network_header(skb);
1662 
1663 		if (checksum_setup(queue, skb)) {
1664 			netdev_dbg(queue->vif->dev,
1665 				   "Can't setup checksum in net_tx_action\n");
1666 			/* We have to set this flag to trigger the callback */
1667 			if (skb_shinfo(skb)->destructor_arg)
1668 				xenvif_skb_zerocopy_prepare(queue, skb);
1669 			kfree_skb(skb);
1670 			continue;
1671 		}
1672 
1673 		skb_probe_transport_header(skb, 0);
1674 
1675 		/* If the packet is GSO then we will have just set up the
1676 		 * transport header offset in checksum_setup so it's now
1677 		 * straightforward to calculate gso_segs.
1678 		 */
1679 		if (skb_is_gso(skb)) {
1680 			int mss = skb_shinfo(skb)->gso_size;
1681 			int hdrlen = skb_transport_header(skb) -
1682 				skb_mac_header(skb) +
1683 				tcp_hdrlen(skb);
1684 
1685 			skb_shinfo(skb)->gso_segs =
1686 				DIV_ROUND_UP(skb->len - hdrlen, mss);
1687 		}
1688 
1689 		queue->stats.rx_bytes += skb->len;
1690 		queue->stats.rx_packets++;
1691 
1692 		work_done++;
1693 
1694 		/* Set this flag right before netif_receive_skb, otherwise
1695 		 * someone might think this packet already left netback, and
1696 		 * do a skb_copy_ubufs while we are still in control of the
1697 		 * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1698 		 */
1699 		if (skb_shinfo(skb)->destructor_arg) {
1700 			xenvif_skb_zerocopy_prepare(queue, skb);
1701 			queue->stats.tx_zerocopy_sent++;
1702 		}
1703 
1704 		netif_receive_skb(skb);
1705 	}
1706 
1707 	return work_done;
1708 }
1709 
1710 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1711 {
1712 	unsigned long flags;
1713 	pending_ring_idx_t index;
1714 	struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1715 
1716 	/* This is the only place where we grab this lock, to protect callbacks
1717 	 * from each other.
1718 	 */
1719 	spin_lock_irqsave(&queue->callback_lock, flags);
1720 	do {
1721 		u16 pending_idx = ubuf->desc;
1722 		ubuf = (struct ubuf_info *) ubuf->ctx;
1723 		BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1724 			MAX_PENDING_REQS);
1725 		index = pending_index(queue->dealloc_prod);
1726 		queue->dealloc_ring[index] = pending_idx;
1727 		/* Sync with xenvif_tx_dealloc_action:
1728 		 * insert idx then incr producer.
1729 		 */
1730 		smp_wmb();
1731 		queue->dealloc_prod++;
1732 	} while (ubuf);
1733 	wake_up(&queue->dealloc_wq);
1734 	spin_unlock_irqrestore(&queue->callback_lock, flags);
1735 
1736 	if (likely(zerocopy_success))
1737 		queue->stats.tx_zerocopy_success++;
1738 	else
1739 		queue->stats.tx_zerocopy_fail++;
1740 	xenvif_skb_zerocopy_complete(queue);
1741 }
1742 
1743 static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1744 {
1745 	struct gnttab_unmap_grant_ref *gop;
1746 	pending_ring_idx_t dc, dp;
1747 	u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1748 	unsigned int i = 0;
1749 
1750 	dc = queue->dealloc_cons;
1751 	gop = queue->tx_unmap_ops;
1752 
1753 	/* Free up any grants we have finished using */
1754 	do {
1755 		dp = queue->dealloc_prod;
1756 
1757 		/* Ensure we see all indices enqueued by all
1758 		 * xenvif_zerocopy_callback().
1759 		 */
1760 		smp_rmb();
1761 
1762 		while (dc != dp) {
1763 			BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS);
1764 			pending_idx =
1765 				queue->dealloc_ring[pending_index(dc++)];
1766 
1767 			pending_idx_release[gop-queue->tx_unmap_ops] =
1768 				pending_idx;
1769 			queue->pages_to_unmap[gop-queue->tx_unmap_ops] =
1770 				queue->mmap_pages[pending_idx];
1771 			gnttab_set_unmap_op(gop,
1772 					    idx_to_kaddr(queue, pending_idx),
1773 					    GNTMAP_host_map,
1774 					    queue->grant_tx_handle[pending_idx]);
1775 			xenvif_grant_handle_reset(queue, pending_idx);
1776 			++gop;
1777 		}
1778 
1779 	} while (dp != queue->dealloc_prod);
1780 
1781 	queue->dealloc_cons = dc;
1782 
1783 	if (gop - queue->tx_unmap_ops > 0) {
1784 		int ret;
1785 		ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1786 					NULL,
1787 					queue->pages_to_unmap,
1788 					gop - queue->tx_unmap_ops);
1789 		if (ret) {
1790 			netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tx ret %d\n",
1791 				   gop - queue->tx_unmap_ops, ret);
1792 			for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1793 				if (gop[i].status != GNTST_okay)
1794 					netdev_err(queue->vif->dev,
1795 						   " host_addr: %llx handle: %x status: %d\n",
1796 						   gop[i].host_addr,
1797 						   gop[i].handle,
1798 						   gop[i].status);
1799 			}
1800 			BUG();
1801 		}
1802 	}
1803 
1804 	for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1805 		xenvif_idx_release(queue, pending_idx_release[i],
1806 				   XEN_NETIF_RSP_OKAY);
1807 }
1808 
1809 
1810 /* Called after netfront has transmitted */
1811 int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1812 {
1813 	unsigned nr_mops, nr_cops = 0;
1814 	int work_done, ret;
1815 
1816 	if (unlikely(!tx_work_todo(queue)))
1817 		return 0;
1818 
1819 	xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1820 
1821 	if (nr_cops == 0)
1822 		return 0;
1823 
1824 	gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
1825 	if (nr_mops != 0) {
1826 		ret = gnttab_map_refs(queue->tx_map_ops,
1827 				      NULL,
1828 				      queue->pages_to_map,
1829 				      nr_mops);
1830 		BUG_ON(ret);
1831 	}
1832 
1833 	work_done = xenvif_tx_submit(queue);
1834 
1835 	return work_done;
1836 }
1837 
1838 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1839 			       u8 status)
1840 {
1841 	struct pending_tx_info *pending_tx_info;
1842 	pending_ring_idx_t index;
1843 	unsigned long flags;
1844 
1845 	pending_tx_info = &queue->pending_tx_info[pending_idx];
1846 	spin_lock_irqsave(&queue->response_lock, flags);
1847 	make_tx_response(queue, &pending_tx_info->req, status);
1848 	index = pending_index(queue->pending_prod);
1849 	queue->pending_ring[index] = pending_idx;
1850 	/* TX shouldn't use the index before we give it back here */
1851 	mb();
1852 	queue->pending_prod++;
1853 	spin_unlock_irqrestore(&queue->response_lock, flags);
1854 }
1855 
1856 
1857 static void make_tx_response(struct xenvif_queue *queue,
1858 			     struct xen_netif_tx_request *txp,
1859 			     s8       st)
1860 {
1861 	RING_IDX i = queue->tx.rsp_prod_pvt;
1862 	struct xen_netif_tx_response *resp;
1863 	int notify;
1864 
1865 	resp = RING_GET_RESPONSE(&queue->tx, i);
1866 	resp->id     = txp->id;
1867 	resp->status = st;
1868 
1869 	if (txp->flags & XEN_NETTXF_extra_info)
1870 		RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1871 
1872 	queue->tx.rsp_prod_pvt = ++i;
1873 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1874 	if (notify)
1875 		notify_remote_via_irq(queue->tx_irq);
1876 }
1877 
1878 static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
1879 					     u16      id,
1880 					     s8       st,
1881 					     u16      offset,
1882 					     u16      size,
1883 					     u16      flags)
1884 {
1885 	RING_IDX i = queue->rx.rsp_prod_pvt;
1886 	struct xen_netif_rx_response *resp;
1887 
1888 	resp = RING_GET_RESPONSE(&queue->rx, i);
1889 	resp->offset     = offset;
1890 	resp->flags      = flags;
1891 	resp->id         = id;
1892 	resp->status     = (s16)size;
1893 	if (st < 0)
1894 		resp->status = (s16)st;
1895 
1896 	queue->rx.rsp_prod_pvt = ++i;
1897 
1898 	return resp;
1899 }
1900 
1901 void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1902 {
1903 	int ret;
1904 	struct gnttab_unmap_grant_ref tx_unmap_op;
1905 
1906 	gnttab_set_unmap_op(&tx_unmap_op,
1907 			    idx_to_kaddr(queue, pending_idx),
1908 			    GNTMAP_host_map,
1909 			    queue->grant_tx_handle[pending_idx]);
1910 	xenvif_grant_handle_reset(queue, pending_idx);
1911 
1912 	ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1913 				&queue->mmap_pages[pending_idx], 1);
1914 	if (ret) {
1915 		netdev_err(queue->vif->dev,
1916 			   "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: %x status: %d\n",
1917 			   ret,
1918 			   pending_idx,
1919 			   tx_unmap_op.host_addr,
1920 			   tx_unmap_op.handle,
1921 			   tx_unmap_op.status);
1922 		BUG();
1923 	}
1924 }
1925 
1926 static inline int tx_work_todo(struct xenvif_queue *queue)
1927 {
1928 	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1929 		return 1;
1930 
1931 	return 0;
1932 }
1933 
1934 static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1935 {
1936 	return queue->dealloc_cons != queue->dealloc_prod;
1937 }
1938 
1939 void xenvif_unmap_frontend_rings(struct xenvif_queue *queue)
1940 {
1941 	if (queue->tx.sring)
1942 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1943 					queue->tx.sring);
1944 	if (queue->rx.sring)
1945 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1946 					queue->rx.sring);
1947 }
1948 
1949 int xenvif_map_frontend_rings(struct xenvif_queue *queue,
1950 			      grant_ref_t tx_ring_ref,
1951 			      grant_ref_t rx_ring_ref)
1952 {
1953 	void *addr;
1954 	struct xen_netif_tx_sring *txs;
1955 	struct xen_netif_rx_sring *rxs;
1956 
1957 	int err = -ENOMEM;
1958 
1959 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1960 				     tx_ring_ref, &addr);
1961 	if (err)
1962 		goto err;
1963 
1964 	txs = (struct xen_netif_tx_sring *)addr;
1965 	BACK_RING_INIT(&queue->tx, txs, PAGE_SIZE);
1966 
1967 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1968 				     rx_ring_ref, &addr);
1969 	if (err)
1970 		goto err;
1971 
1972 	rxs = (struct xen_netif_rx_sring *)addr;
1973 	BACK_RING_INIT(&queue->rx, rxs, PAGE_SIZE);
1974 
1975 	return 0;
1976 
1977 err:
1978 	xenvif_unmap_frontend_rings(queue);
1979 	return err;
1980 }
1981 
1982 static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
1983 {
1984 	struct xenvif *vif = queue->vif;
1985 
1986 	queue->stalled = true;
1987 
1988 	/* At least one queue has stalled? Disable the carrier. */
1989 	spin_lock(&vif->lock);
1990 	if (vif->stalled_queues++ == 0) {
1991 		netdev_info(vif->dev, "Guest Rx stalled");
1992 		netif_carrier_off(vif->dev);
1993 	}
1994 	spin_unlock(&vif->lock);
1995 }
1996 
1997 static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
1998 {
1999 	struct xenvif *vif = queue->vif;
2000 
2001 	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
2002 	queue->stalled = false;
2003 
2004 	/* All queues are ready? Enable the carrier. */
2005 	spin_lock(&vif->lock);
2006 	if (--vif->stalled_queues == 0) {
2007 		netdev_info(vif->dev, "Guest Rx ready");
2008 		netif_carrier_on(vif->dev);
2009 	}
2010 	spin_unlock(&vif->lock);
2011 }
2012 
2013 static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
2014 {
2015 	RING_IDX prod, cons;
2016 
2017 	prod = queue->rx.sring->req_prod;
2018 	cons = queue->rx.req_cons;
2019 
2020 	return !queue->stalled
2021 		&& prod - cons < XEN_NETBK_RX_SLOTS_MAX
2022 		&& time_after(jiffies,
2023 			      queue->last_rx_time + rx_stall_timeout_jiffies);
2024 }
2025 
2026 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2027 {
2028 	RING_IDX prod, cons;
2029 
2030 	prod = queue->rx.sring->req_prod;
2031 	cons = queue->rx.req_cons;
2032 
2033 	return queue->stalled
2034 		&& prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
2035 }
2036 
2037 static bool xenvif_have_rx_work(struct xenvif_queue *queue)
2038 {
2039 	return (!skb_queue_empty(&queue->rx_queue)
2040 		&& xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
2041 		|| xenvif_rx_queue_stalled(queue)
2042 		|| xenvif_rx_queue_ready(queue)
2043 		|| kthread_should_stop()
2044 		|| queue->vif->disabled;
2045 }
2046 
2047 static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2048 {
2049 	struct sk_buff *skb;
2050 	long timeout;
2051 
2052 	skb = skb_peek(&queue->rx_queue);
2053 	if (!skb)
2054 		return MAX_SCHEDULE_TIMEOUT;
2055 
2056 	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2057 	return timeout < 0 ? 0 : timeout;
2058 }
2059 
2060 /* Wait until the guest Rx thread has work.
2061  *
2062  * The timeout needs to be adjusted based on the current head of the
2063  * queue (and not just the head at the beginning).  In particular, if
2064  * the queue is initially empty an infinite timeout is used and this
2065  * needs to be reduced when a skb is queued.
2066  *
2067  * This cannot be done with wait_event_timeout() because it only
2068  * calculates the timeout once.
2069  */
2070 static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2071 {
2072 	DEFINE_WAIT(wait);
2073 
2074 	if (xenvif_have_rx_work(queue))
2075 		return;
2076 
2077 	for (;;) {
2078 		long ret;
2079 
2080 		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2081 		if (xenvif_have_rx_work(queue))
2082 			break;
2083 		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2084 		if (!ret)
2085 			break;
2086 	}
2087 	finish_wait(&queue->wq, &wait);
2088 }
2089 
2090 int xenvif_kthread_guest_rx(void *data)
2091 {
2092 	struct xenvif_queue *queue = data;
2093 	struct xenvif *vif = queue->vif;
2094 
2095 	for (;;) {
2096 		xenvif_wait_for_rx_work(queue);
2097 
2098 		if (kthread_should_stop())
2099 			break;
2100 
2101 		/* This frontend is found to be rogue, disable it in
2102 		 * kthread context. Currently this is only set when
2103 		 * netback finds out frontend sends malformed packet,
2104 		 * but we cannot disable the interface in softirq
2105 		 * context so we defer it here, if this thread is
2106 		 * associated with queue 0.
2107 		 */
2108 		if (unlikely(vif->disabled && queue->id == 0)) {
2109 			xenvif_carrier_off(vif);
2110 			xenvif_rx_queue_purge(queue);
2111 			continue;
2112 		}
2113 
2114 		if (!skb_queue_empty(&queue->rx_queue))
2115 			xenvif_rx_action(queue);
2116 
2117 		/* If the guest hasn't provided any Rx slots for a
2118 		 * while it's probably not responsive, drop the
2119 		 * carrier so packets are dropped earlier.
2120 		 */
2121 		if (xenvif_rx_queue_stalled(queue))
2122 			xenvif_queue_carrier_off(queue);
2123 		else if (xenvif_rx_queue_ready(queue))
2124 			xenvif_queue_carrier_on(queue);
2125 
2126 		/* Queued packets may have foreign pages from other
2127 		 * domains.  These cannot be queued indefinitely as
2128 		 * this would starve guests of grant refs and transmit
2129 		 * slots.
2130 		 */
2131 		xenvif_rx_queue_drop_expired(queue);
2132 
2133 		xenvif_rx_queue_maybe_wake(queue);
2134 
2135 		cond_resched();
2136 	}
2137 
2138 	/* Bin any remaining skbs */
2139 	xenvif_rx_queue_purge(queue);
2140 
2141 	return 0;
2142 }
2143 
2144 static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
2145 {
2146 	/* Dealloc thread must remain running until all inflight
2147 	 * packets complete.
2148 	 */
2149 	return kthread_should_stop() &&
2150 		!atomic_read(&queue->inflight_packets);
2151 }
2152 
2153 int xenvif_dealloc_kthread(void *data)
2154 {
2155 	struct xenvif_queue *queue = data;
2156 
2157 	for (;;) {
2158 		wait_event_interruptible(queue->dealloc_wq,
2159 					 tx_dealloc_work_todo(queue) ||
2160 					 xenvif_dealloc_kthread_should_stop(queue));
2161 		if (xenvif_dealloc_kthread_should_stop(queue))
2162 			break;
2163 
2164 		xenvif_tx_dealloc_action(queue);
2165 		cond_resched();
2166 	}
2167 
2168 	/* Unmap anything remaining*/
2169 	if (tx_dealloc_work_todo(queue))
2170 		xenvif_tx_dealloc_action(queue);
2171 
2172 	return 0;
2173 }
2174 
2175 static int __init netback_init(void)
2176 {
2177 	int rc = 0;
2178 
2179 	if (!xen_domain())
2180 		return -ENODEV;
2181 
2182 	/* Allow as many queues as there are CPUs, by default */
2183 	xenvif_max_queues = num_online_cpus();
2184 
2185 	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
2186 		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
2187 			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2188 		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
2189 	}
2190 
2191 	rc = xenvif_xenbus_init();
2192 	if (rc)
2193 		goto failed_init;
2194 
2195 	rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
2196 	rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
2197 
2198 #ifdef CONFIG_DEBUG_FS
2199 	xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
2200 	if (IS_ERR_OR_NULL(xen_netback_dbg_root))
2201 		pr_warn("Init of debugfs returned %ld!\n",
2202 			PTR_ERR(xen_netback_dbg_root));
2203 #endif /* CONFIG_DEBUG_FS */
2204 
2205 	return 0;
2206 
2207 failed_init:
2208 	return rc;
2209 }
2210 
2211 module_init(netback_init);
2212 
2213 static void __exit netback_fini(void)
2214 {
2215 #ifdef CONFIG_DEBUG_FS
2216 	if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
2217 		debugfs_remove_recursive(xen_netback_dbg_root);
2218 #endif /* CONFIG_DEBUG_FS */
2219 	xenvif_xenbus_fini();
2220 }
2221 module_exit(netback_fini);
2222 
2223 MODULE_LICENSE("Dual BSD/GPL");
2224 MODULE_ALIAS("xen-backend:vif");
2225