xref: /linux/drivers/net/xen-netback/netback.c (revision 89e47d3b8a273b0eac21e4bf6d7fdb86b654fa16)
1 /*
2  * Back-end of the driver for virtual network devices. This portion of the
3  * driver exports a 'unified' network-device interface that can be accessed
4  * by any operating system that implements a compatible front end. A
5  * reference front-end implementation can be found in:
6  *  drivers/net/xen-netfront.c
7  *
8  * Copyright (c) 2002-2005, K A Fraser
9  *
10  * This program is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU General Public License version 2
12  * as published by the Free Software Foundation; or, when distributed
13  * separately from the Linux kernel or incorporated into other
14  * software packages, subject to the following license:
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a copy
17  * of this source file (the "Software"), to deal in the Software without
18  * restriction, including without limitation the rights to use, copy, modify,
19  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20  * and to permit persons to whom the Software is furnished to do so, subject to
21  * the following conditions:
22  *
23  * The above copyright notice and this permission notice shall be included in
24  * all copies or substantial portions of the Software.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32  * IN THE SOFTWARE.
33  */
34 
35 #include "common.h"
36 
37 #include <linux/kthread.h>
38 #include <linux/if_vlan.h>
39 #include <linux/udp.h>
40 
41 #include <net/tcp.h>
42 #include <net/ip6_checksum.h>
43 
44 #include <xen/xen.h>
45 #include <xen/events.h>
46 #include <xen/interface/memory.h>
47 
48 #include <asm/xen/hypercall.h>
49 #include <asm/xen/page.h>
50 
51 /* Provide an option to disable split event channels at load time as
52  * event channels are limited resource. Split event channels are
53  * enabled by default.
54  */
55 bool separate_tx_rx_irq = 1;
56 module_param(separate_tx_rx_irq, bool, 0644);
57 
58 /*
59  * This is the maximum slots a skb can have. If a guest sends a skb
60  * which exceeds this limit it is considered malicious.
61  */
62 #define FATAL_SKB_SLOTS_DEFAULT 20
63 static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
64 module_param(fatal_skb_slots, uint, 0444);
65 
66 /*
67  * To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating
68  * the maximum slots a valid packet can use. Now this value is defined
69  * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by
70  * all backend.
71  */
72 #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN
73 
74 /*
75  * If head != INVALID_PENDING_RING_IDX, it means this tx request is head of
76  * one or more merged tx requests, otherwise it is the continuation of
77  * previous tx request.
78  */
79 static inline int pending_tx_is_head(struct xenvif *vif, RING_IDX idx)
80 {
81 	return vif->pending_tx_info[idx].head != INVALID_PENDING_RING_IDX;
82 }
83 
84 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
85 			       u8 status);
86 
87 static void make_tx_response(struct xenvif *vif,
88 			     struct xen_netif_tx_request *txp,
89 			     s8       st);
90 
91 static inline int tx_work_todo(struct xenvif *vif);
92 static inline int rx_work_todo(struct xenvif *vif);
93 
94 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
95 					     u16      id,
96 					     s8       st,
97 					     u16      offset,
98 					     u16      size,
99 					     u16      flags);
100 
101 static inline unsigned long idx_to_pfn(struct xenvif *vif,
102 				       u16 idx)
103 {
104 	return page_to_pfn(vif->mmap_pages[idx]);
105 }
106 
107 static inline unsigned long idx_to_kaddr(struct xenvif *vif,
108 					 u16 idx)
109 {
110 	return (unsigned long)pfn_to_kaddr(idx_to_pfn(vif, idx));
111 }
112 
113 /* This is a miniumum size for the linear area to avoid lots of
114  * calls to __pskb_pull_tail() as we set up checksum offsets. The
115  * value 128 was chosen as it covers all IPv4 and most likely
116  * IPv6 headers.
117  */
118 #define PKT_PROT_LEN 128
119 
120 static u16 frag_get_pending_idx(skb_frag_t *frag)
121 {
122 	return (u16)frag->page_offset;
123 }
124 
125 static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
126 {
127 	frag->page_offset = pending_idx;
128 }
129 
130 static inline pending_ring_idx_t pending_index(unsigned i)
131 {
132 	return i & (MAX_PENDING_REQS-1);
133 }
134 
135 static inline pending_ring_idx_t nr_pending_reqs(struct xenvif *vif)
136 {
137 	return MAX_PENDING_REQS -
138 		vif->pending_prod + vif->pending_cons;
139 }
140 
141 bool xenvif_rx_ring_slots_available(struct xenvif *vif, int needed)
142 {
143 	RING_IDX prod, cons;
144 
145 	do {
146 		prod = vif->rx.sring->req_prod;
147 		cons = vif->rx.req_cons;
148 
149 		if (prod - cons >= needed)
150 			return true;
151 
152 		vif->rx.sring->req_event = prod + 1;
153 
154 		/* Make sure event is visible before we check prod
155 		 * again.
156 		 */
157 		mb();
158 	} while (vif->rx.sring->req_prod != prod);
159 
160 	return false;
161 }
162 
163 /*
164  * Returns true if we should start a new receive buffer instead of
165  * adding 'size' bytes to a buffer which currently contains 'offset'
166  * bytes.
167  */
168 static bool start_new_rx_buffer(int offset, unsigned long size, int head)
169 {
170 	/* simple case: we have completely filled the current buffer. */
171 	if (offset == MAX_BUFFER_OFFSET)
172 		return true;
173 
174 	/*
175 	 * complex case: start a fresh buffer if the current frag
176 	 * would overflow the current buffer but only if:
177 	 *     (i)   this frag would fit completely in the next buffer
178 	 * and (ii)  there is already some data in the current buffer
179 	 * and (iii) this is not the head buffer.
180 	 *
181 	 * Where:
182 	 * - (i) stops us splitting a frag into two copies
183 	 *   unless the frag is too large for a single buffer.
184 	 * - (ii) stops us from leaving a buffer pointlessly empty.
185 	 * - (iii) stops us leaving the first buffer
186 	 *   empty. Strictly speaking this is already covered
187 	 *   by (ii) but is explicitly checked because
188 	 *   netfront relies on the first buffer being
189 	 *   non-empty and can crash otherwise.
190 	 *
191 	 * This means we will effectively linearise small
192 	 * frags but do not needlessly split large buffers
193 	 * into multiple copies tend to give large frags their
194 	 * own buffers as before.
195 	 */
196 	if ((offset + size > MAX_BUFFER_OFFSET) &&
197 	    (size <= MAX_BUFFER_OFFSET) && offset && !head)
198 		return true;
199 
200 	return false;
201 }
202 
203 struct netrx_pending_operations {
204 	unsigned copy_prod, copy_cons;
205 	unsigned meta_prod, meta_cons;
206 	struct gnttab_copy *copy;
207 	struct xenvif_rx_meta *meta;
208 	int copy_off;
209 	grant_ref_t copy_gref;
210 };
211 
212 static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif *vif,
213 						 struct netrx_pending_operations *npo)
214 {
215 	struct xenvif_rx_meta *meta;
216 	struct xen_netif_rx_request *req;
217 
218 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
219 
220 	meta = npo->meta + npo->meta_prod++;
221 	meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
222 	meta->gso_size = 0;
223 	meta->size = 0;
224 	meta->id = req->id;
225 
226 	npo->copy_off = 0;
227 	npo->copy_gref = req->gref;
228 
229 	return meta;
230 }
231 
232 /*
233  * Set up the grant operations for this fragment. If it's a flipping
234  * interface, we also set up the unmap request from here.
235  */
236 static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
237 				 struct netrx_pending_operations *npo,
238 				 struct page *page, unsigned long size,
239 				 unsigned long offset, int *head)
240 {
241 	struct gnttab_copy *copy_gop;
242 	struct xenvif_rx_meta *meta;
243 	unsigned long bytes;
244 	int gso_type;
245 
246 	/* Data must not cross a page boundary. */
247 	BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
248 
249 	meta = npo->meta + npo->meta_prod - 1;
250 
251 	/* Skip unused frames from start of page */
252 	page += offset >> PAGE_SHIFT;
253 	offset &= ~PAGE_MASK;
254 
255 	while (size > 0) {
256 		BUG_ON(offset >= PAGE_SIZE);
257 		BUG_ON(npo->copy_off > MAX_BUFFER_OFFSET);
258 
259 		bytes = PAGE_SIZE - offset;
260 
261 		if (bytes > size)
262 			bytes = size;
263 
264 		if (start_new_rx_buffer(npo->copy_off, bytes, *head)) {
265 			/*
266 			 * Netfront requires there to be some data in the head
267 			 * buffer.
268 			 */
269 			BUG_ON(*head);
270 
271 			meta = get_next_rx_buffer(vif, npo);
272 		}
273 
274 		if (npo->copy_off + bytes > MAX_BUFFER_OFFSET)
275 			bytes = MAX_BUFFER_OFFSET - npo->copy_off;
276 
277 		copy_gop = npo->copy + npo->copy_prod++;
278 		copy_gop->flags = GNTCOPY_dest_gref;
279 		copy_gop->len = bytes;
280 
281 		copy_gop->source.domid = DOMID_SELF;
282 		copy_gop->source.u.gmfn = virt_to_mfn(page_address(page));
283 		copy_gop->source.offset = offset;
284 
285 		copy_gop->dest.domid = vif->domid;
286 		copy_gop->dest.offset = npo->copy_off;
287 		copy_gop->dest.u.ref = npo->copy_gref;
288 
289 		npo->copy_off += bytes;
290 		meta->size += bytes;
291 
292 		offset += bytes;
293 		size -= bytes;
294 
295 		/* Next frame */
296 		if (offset == PAGE_SIZE && size) {
297 			BUG_ON(!PageCompound(page));
298 			page++;
299 			offset = 0;
300 		}
301 
302 		/* Leave a gap for the GSO descriptor. */
303 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
304 			gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
305 		else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
306 			gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
307 		else
308 			gso_type = XEN_NETIF_GSO_TYPE_NONE;
309 
310 		if (*head && ((1 << gso_type) & vif->gso_mask))
311 			vif->rx.req_cons++;
312 
313 		*head = 0; /* There must be something in this buffer now. */
314 
315 	}
316 }
317 
318 /*
319  * Prepare an SKB to be transmitted to the frontend.
320  *
321  * This function is responsible for allocating grant operations, meta
322  * structures, etc.
323  *
324  * It returns the number of meta structures consumed. The number of
325  * ring slots used is always equal to the number of meta slots used
326  * plus the number of GSO descriptors used. Currently, we use either
327  * zero GSO descriptors (for non-GSO packets) or one descriptor (for
328  * frontend-side LRO).
329  */
330 static int xenvif_gop_skb(struct sk_buff *skb,
331 			  struct netrx_pending_operations *npo)
332 {
333 	struct xenvif *vif = netdev_priv(skb->dev);
334 	int nr_frags = skb_shinfo(skb)->nr_frags;
335 	int i;
336 	struct xen_netif_rx_request *req;
337 	struct xenvif_rx_meta *meta;
338 	unsigned char *data;
339 	int head = 1;
340 	int old_meta_prod;
341 	int gso_type;
342 	int gso_size;
343 
344 	old_meta_prod = npo->meta_prod;
345 
346 	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
347 		gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
348 		gso_size = skb_shinfo(skb)->gso_size;
349 	} else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
350 		gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
351 		gso_size = skb_shinfo(skb)->gso_size;
352 	} else {
353 		gso_type = XEN_NETIF_GSO_TYPE_NONE;
354 		gso_size = 0;
355 	}
356 
357 	/* Set up a GSO prefix descriptor, if necessary */
358 	if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
359 		req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
360 		meta = npo->meta + npo->meta_prod++;
361 		meta->gso_type = gso_type;
362 		meta->gso_size = gso_size;
363 		meta->size = 0;
364 		meta->id = req->id;
365 	}
366 
367 	req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
368 	meta = npo->meta + npo->meta_prod++;
369 
370 	if ((1 << gso_type) & vif->gso_mask) {
371 		meta->gso_type = gso_type;
372 		meta->gso_size = gso_size;
373 	} else {
374 		meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
375 		meta->gso_size = 0;
376 	}
377 
378 	meta->size = 0;
379 	meta->id = req->id;
380 	npo->copy_off = 0;
381 	npo->copy_gref = req->gref;
382 
383 	data = skb->data;
384 	while (data < skb_tail_pointer(skb)) {
385 		unsigned int offset = offset_in_page(data);
386 		unsigned int len = PAGE_SIZE - offset;
387 
388 		if (data + len > skb_tail_pointer(skb))
389 			len = skb_tail_pointer(skb) - data;
390 
391 		xenvif_gop_frag_copy(vif, skb, npo,
392 				     virt_to_page(data), len, offset, &head);
393 		data += len;
394 	}
395 
396 	for (i = 0; i < nr_frags; i++) {
397 		xenvif_gop_frag_copy(vif, skb, npo,
398 				     skb_frag_page(&skb_shinfo(skb)->frags[i]),
399 				     skb_frag_size(&skb_shinfo(skb)->frags[i]),
400 				     skb_shinfo(skb)->frags[i].page_offset,
401 				     &head);
402 	}
403 
404 	return npo->meta_prod - old_meta_prod;
405 }
406 
407 /*
408  * This is a twin to xenvif_gop_skb.  Assume that xenvif_gop_skb was
409  * used to set up the operations on the top of
410  * netrx_pending_operations, which have since been done.  Check that
411  * they didn't give any errors and advance over them.
412  */
413 static int xenvif_check_gop(struct xenvif *vif, int nr_meta_slots,
414 			    struct netrx_pending_operations *npo)
415 {
416 	struct gnttab_copy     *copy_op;
417 	int status = XEN_NETIF_RSP_OKAY;
418 	int i;
419 
420 	for (i = 0; i < nr_meta_slots; i++) {
421 		copy_op = npo->copy + npo->copy_cons++;
422 		if (copy_op->status != GNTST_okay) {
423 			netdev_dbg(vif->dev,
424 				   "Bad status %d from copy to DOM%d.\n",
425 				   copy_op->status, vif->domid);
426 			status = XEN_NETIF_RSP_ERROR;
427 		}
428 	}
429 
430 	return status;
431 }
432 
433 static void xenvif_add_frag_responses(struct xenvif *vif, int status,
434 				      struct xenvif_rx_meta *meta,
435 				      int nr_meta_slots)
436 {
437 	int i;
438 	unsigned long offset;
439 
440 	/* No fragments used */
441 	if (nr_meta_slots <= 1)
442 		return;
443 
444 	nr_meta_slots--;
445 
446 	for (i = 0; i < nr_meta_slots; i++) {
447 		int flags;
448 		if (i == nr_meta_slots - 1)
449 			flags = 0;
450 		else
451 			flags = XEN_NETRXF_more_data;
452 
453 		offset = 0;
454 		make_rx_response(vif, meta[i].id, status, offset,
455 				 meta[i].size, flags);
456 	}
457 }
458 
459 struct skb_cb_overlay {
460 	int meta_slots_used;
461 };
462 
463 void xenvif_kick_thread(struct xenvif *vif)
464 {
465 	wake_up(&vif->wq);
466 }
467 
468 static void xenvif_rx_action(struct xenvif *vif)
469 {
470 	s8 status;
471 	u16 flags;
472 	struct xen_netif_rx_response *resp;
473 	struct sk_buff_head rxq;
474 	struct sk_buff *skb;
475 	LIST_HEAD(notify);
476 	int ret;
477 	unsigned long offset;
478 	struct skb_cb_overlay *sco;
479 	int need_to_notify = 0;
480 
481 	struct netrx_pending_operations npo = {
482 		.copy  = vif->grant_copy_op,
483 		.meta  = vif->meta,
484 	};
485 
486 	skb_queue_head_init(&rxq);
487 
488 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
489 		int max_slots_needed;
490 		int i;
491 
492 		/* We need a cheap worse case estimate for the number of
493 		 * slots we'll use.
494 		 */
495 
496 		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
497 						skb_headlen(skb),
498 						PAGE_SIZE);
499 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
500 			unsigned int size;
501 			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
502 			max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
503 		}
504 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
505 		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
506 			max_slots_needed++;
507 
508 		/* If the skb may not fit then bail out now */
509 		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
510 			skb_queue_head(&vif->rx_queue, skb);
511 			need_to_notify = 1;
512 			break;
513 		}
514 
515 		sco = (struct skb_cb_overlay *)skb->cb;
516 		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
517 		BUG_ON(sco->meta_slots_used > max_slots_needed);
518 
519 		__skb_queue_tail(&rxq, skb);
520 	}
521 
522 	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
523 
524 	if (!npo.copy_prod)
525 		goto done;
526 
527 	BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
528 	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
529 
530 	while ((skb = __skb_dequeue(&rxq)) != NULL) {
531 		sco = (struct skb_cb_overlay *)skb->cb;
532 
533 		if ((1 << vif->meta[npo.meta_cons].gso_type) &
534 		    vif->gso_prefix_mask) {
535 			resp = RING_GET_RESPONSE(&vif->rx,
536 						 vif->rx.rsp_prod_pvt++);
537 
538 			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;
539 
540 			resp->offset = vif->meta[npo.meta_cons].gso_size;
541 			resp->id = vif->meta[npo.meta_cons].id;
542 			resp->status = sco->meta_slots_used;
543 
544 			npo.meta_cons++;
545 			sco->meta_slots_used--;
546 		}
547 
548 
549 		vif->dev->stats.tx_bytes += skb->len;
550 		vif->dev->stats.tx_packets++;
551 
552 		status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);
553 
554 		if (sco->meta_slots_used == 1)
555 			flags = 0;
556 		else
557 			flags = XEN_NETRXF_more_data;
558 
559 		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
560 			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
561 		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
562 			/* remote but checksummed. */
563 			flags |= XEN_NETRXF_data_validated;
564 
565 		offset = 0;
566 		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
567 					status, offset,
568 					vif->meta[npo.meta_cons].size,
569 					flags);
570 
571 		if ((1 << vif->meta[npo.meta_cons].gso_type) &
572 		    vif->gso_mask) {
573 			struct xen_netif_extra_info *gso =
574 				(struct xen_netif_extra_info *)
575 				RING_GET_RESPONSE(&vif->rx,
576 						  vif->rx.rsp_prod_pvt++);
577 
578 			resp->flags |= XEN_NETRXF_extra_info;
579 
580 			gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
581 			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
582 			gso->u.gso.pad = 0;
583 			gso->u.gso.features = 0;
584 
585 			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
586 			gso->flags = 0;
587 		}
588 
589 		xenvif_add_frag_responses(vif, status,
590 					  vif->meta + npo.meta_cons + 1,
591 					  sco->meta_slots_used);
592 
593 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
594 
595 		if (ret)
596 			need_to_notify = 1;
597 
598 		npo.meta_cons += sco->meta_slots_used;
599 		dev_kfree_skb(skb);
600 	}
601 
602 done:
603 	if (need_to_notify)
604 		notify_remote_via_irq(vif->rx_irq);
605 }
606 
607 void xenvif_check_rx_xenvif(struct xenvif *vif)
608 {
609 	int more_to_do;
610 
611 	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);
612 
613 	if (more_to_do)
614 		napi_schedule(&vif->napi);
615 }
616 
617 static void tx_add_credit(struct xenvif *vif)
618 {
619 	unsigned long max_burst, max_credit;
620 
621 	/*
622 	 * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
623 	 * Otherwise the interface can seize up due to insufficient credit.
624 	 */
625 	max_burst = RING_GET_REQUEST(&vif->tx, vif->tx.req_cons)->size;
626 	max_burst = min(max_burst, 131072UL);
627 	max_burst = max(max_burst, vif->credit_bytes);
628 
629 	/* Take care that adding a new chunk of credit doesn't wrap to zero. */
630 	max_credit = vif->remaining_credit + vif->credit_bytes;
631 	if (max_credit < vif->remaining_credit)
632 		max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
633 
634 	vif->remaining_credit = min(max_credit, max_burst);
635 }
636 
637 static void tx_credit_callback(unsigned long data)
638 {
639 	struct xenvif *vif = (struct xenvif *)data;
640 	tx_add_credit(vif);
641 	xenvif_check_rx_xenvif(vif);
642 }
643 
644 static void xenvif_tx_err(struct xenvif *vif,
645 			  struct xen_netif_tx_request *txp, RING_IDX end)
646 {
647 	RING_IDX cons = vif->tx.req_cons;
648 
649 	do {
650 		make_tx_response(vif, txp, XEN_NETIF_RSP_ERROR);
651 		if (cons == end)
652 			break;
653 		txp = RING_GET_REQUEST(&vif->tx, cons++);
654 	} while (1);
655 	vif->tx.req_cons = cons;
656 }
657 
658 static void xenvif_fatal_tx_err(struct xenvif *vif)
659 {
660 	netdev_err(vif->dev, "fatal error; disabling device\n");
661 	xenvif_carrier_off(vif);
662 }
663 
664 static int xenvif_count_requests(struct xenvif *vif,
665 				 struct xen_netif_tx_request *first,
666 				 struct xen_netif_tx_request *txp,
667 				 int work_to_do)
668 {
669 	RING_IDX cons = vif->tx.req_cons;
670 	int slots = 0;
671 	int drop_err = 0;
672 	int more_data;
673 
674 	if (!(first->flags & XEN_NETTXF_more_data))
675 		return 0;
676 
677 	do {
678 		struct xen_netif_tx_request dropped_tx = { 0 };
679 
680 		if (slots >= work_to_do) {
681 			netdev_err(vif->dev,
682 				   "Asked for %d slots but exceeds this limit\n",
683 				   work_to_do);
684 			xenvif_fatal_tx_err(vif);
685 			return -ENODATA;
686 		}
687 
688 		/* This guest is really using too many slots and
689 		 * considered malicious.
690 		 */
691 		if (unlikely(slots >= fatal_skb_slots)) {
692 			netdev_err(vif->dev,
693 				   "Malicious frontend using %d slots, threshold %u\n",
694 				   slots, fatal_skb_slots);
695 			xenvif_fatal_tx_err(vif);
696 			return -E2BIG;
697 		}
698 
699 		/* Xen network protocol had implicit dependency on
700 		 * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
701 		 * the historical MAX_SKB_FRAGS value 18 to honor the
702 		 * same behavior as before. Any packet using more than
703 		 * 18 slots but less than fatal_skb_slots slots is
704 		 * dropped
705 		 */
706 		if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
707 			if (net_ratelimit())
708 				netdev_dbg(vif->dev,
709 					   "Too many slots (%d) exceeding limit (%d), dropping packet\n",
710 					   slots, XEN_NETBK_LEGACY_SLOTS_MAX);
711 			drop_err = -E2BIG;
712 		}
713 
714 		if (drop_err)
715 			txp = &dropped_tx;
716 
717 		memcpy(txp, RING_GET_REQUEST(&vif->tx, cons + slots),
718 		       sizeof(*txp));
719 
720 		/* If the guest submitted a frame >= 64 KiB then
721 		 * first->size overflowed and following slots will
722 		 * appear to be larger than the frame.
723 		 *
724 		 * This cannot be fatal error as there are buggy
725 		 * frontends that do this.
726 		 *
727 		 * Consume all slots and drop the packet.
728 		 */
729 		if (!drop_err && txp->size > first->size) {
730 			if (net_ratelimit())
731 				netdev_dbg(vif->dev,
732 					   "Invalid tx request, slot size %u > remaining size %u\n",
733 					   txp->size, first->size);
734 			drop_err = -EIO;
735 		}
736 
737 		first->size -= txp->size;
738 		slots++;
739 
740 		if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
741 			netdev_err(vif->dev, "Cross page boundary, txp->offset: %x, size: %u\n",
742 				 txp->offset, txp->size);
743 			xenvif_fatal_tx_err(vif);
744 			return -EINVAL;
745 		}
746 
747 		more_data = txp->flags & XEN_NETTXF_more_data;
748 
749 		if (!drop_err)
750 			txp++;
751 
752 	} while (more_data);
753 
754 	if (drop_err) {
755 		xenvif_tx_err(vif, first, cons + slots);
756 		return drop_err;
757 	}
758 
759 	return slots;
760 }
761 
762 static struct page *xenvif_alloc_page(struct xenvif *vif,
763 				      u16 pending_idx)
764 {
765 	struct page *page;
766 
767 	page = alloc_page(GFP_ATOMIC|__GFP_COLD);
768 	if (!page)
769 		return NULL;
770 	vif->mmap_pages[pending_idx] = page;
771 
772 	return page;
773 }
774 
775 static struct gnttab_copy *xenvif_get_requests(struct xenvif *vif,
776 					       struct sk_buff *skb,
777 					       struct xen_netif_tx_request *txp,
778 					       struct gnttab_copy *gop)
779 {
780 	struct skb_shared_info *shinfo = skb_shinfo(skb);
781 	skb_frag_t *frags = shinfo->frags;
782 	u16 pending_idx = *((u16 *)skb->data);
783 	u16 head_idx = 0;
784 	int slot, start;
785 	struct page *page;
786 	pending_ring_idx_t index, start_idx = 0;
787 	uint16_t dst_offset;
788 	unsigned int nr_slots;
789 	struct pending_tx_info *first = NULL;
790 
791 	/* At this point shinfo->nr_frags is in fact the number of
792 	 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
793 	 */
794 	nr_slots = shinfo->nr_frags;
795 
796 	/* Skip first skb fragment if it is on same page as header fragment. */
797 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
798 
799 	/* Coalesce tx requests, at this point the packet passed in
800 	 * should be <= 64K. Any packets larger than 64K have been
801 	 * handled in xenvif_count_requests().
802 	 */
803 	for (shinfo->nr_frags = slot = start; slot < nr_slots;
804 	     shinfo->nr_frags++) {
805 		struct pending_tx_info *pending_tx_info =
806 			vif->pending_tx_info;
807 
808 		page = alloc_page(GFP_ATOMIC|__GFP_COLD);
809 		if (!page)
810 			goto err;
811 
812 		dst_offset = 0;
813 		first = NULL;
814 		while (dst_offset < PAGE_SIZE && slot < nr_slots) {
815 			gop->flags = GNTCOPY_source_gref;
816 
817 			gop->source.u.ref = txp->gref;
818 			gop->source.domid = vif->domid;
819 			gop->source.offset = txp->offset;
820 
821 			gop->dest.domid = DOMID_SELF;
822 
823 			gop->dest.offset = dst_offset;
824 			gop->dest.u.gmfn = virt_to_mfn(page_address(page));
825 
826 			if (dst_offset + txp->size > PAGE_SIZE) {
827 				/* This page can only merge a portion
828 				 * of tx request. Do not increment any
829 				 * pointer / counter here. The txp
830 				 * will be dealt with in future
831 				 * rounds, eventually hitting the
832 				 * `else` branch.
833 				 */
834 				gop->len = PAGE_SIZE - dst_offset;
835 				txp->offset += gop->len;
836 				txp->size -= gop->len;
837 				dst_offset += gop->len; /* quit loop */
838 			} else {
839 				/* This tx request can be merged in the page */
840 				gop->len = txp->size;
841 				dst_offset += gop->len;
842 
843 				index = pending_index(vif->pending_cons++);
844 
845 				pending_idx = vif->pending_ring[index];
846 
847 				memcpy(&pending_tx_info[pending_idx].req, txp,
848 				       sizeof(*txp));
849 
850 				/* Poison these fields, corresponding
851 				 * fields for head tx req will be set
852 				 * to correct values after the loop.
853 				 */
854 				vif->mmap_pages[pending_idx] = (void *)(~0UL);
855 				pending_tx_info[pending_idx].head =
856 					INVALID_PENDING_RING_IDX;
857 
858 				if (!first) {
859 					first = &pending_tx_info[pending_idx];
860 					start_idx = index;
861 					head_idx = pending_idx;
862 				}
863 
864 				txp++;
865 				slot++;
866 			}
867 
868 			gop++;
869 		}
870 
871 		first->req.offset = 0;
872 		first->req.size = dst_offset;
873 		first->head = start_idx;
874 		vif->mmap_pages[head_idx] = page;
875 		frag_set_pending_idx(&frags[shinfo->nr_frags], head_idx);
876 	}
877 
878 	BUG_ON(shinfo->nr_frags > MAX_SKB_FRAGS);
879 
880 	return gop;
881 err:
882 	/* Unwind, freeing all pages and sending error responses. */
883 	while (shinfo->nr_frags-- > start) {
884 		xenvif_idx_release(vif,
885 				frag_get_pending_idx(&frags[shinfo->nr_frags]),
886 				XEN_NETIF_RSP_ERROR);
887 	}
888 	/* The head too, if necessary. */
889 	if (start)
890 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
891 
892 	return NULL;
893 }
894 
895 static int xenvif_tx_check_gop(struct xenvif *vif,
896 			       struct sk_buff *skb,
897 			       struct gnttab_copy **gopp)
898 {
899 	struct gnttab_copy *gop = *gopp;
900 	u16 pending_idx = *((u16 *)skb->data);
901 	struct skb_shared_info *shinfo = skb_shinfo(skb);
902 	struct pending_tx_info *tx_info;
903 	int nr_frags = shinfo->nr_frags;
904 	int i, err, start;
905 	u16 peek; /* peek into next tx request */
906 
907 	/* Check status of header. */
908 	err = gop->status;
909 	if (unlikely(err))
910 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
911 
912 	/* Skip first skb fragment if it is on same page as header fragment. */
913 	start = (frag_get_pending_idx(&shinfo->frags[0]) == pending_idx);
914 
915 	for (i = start; i < nr_frags; i++) {
916 		int j, newerr;
917 		pending_ring_idx_t head;
918 
919 		pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
920 		tx_info = &vif->pending_tx_info[pending_idx];
921 		head = tx_info->head;
922 
923 		/* Check error status: if okay then remember grant handle. */
924 		do {
925 			newerr = (++gop)->status;
926 			if (newerr)
927 				break;
928 			peek = vif->pending_ring[pending_index(++head)];
929 		} while (!pending_tx_is_head(vif, peek));
930 
931 		if (likely(!newerr)) {
932 			/* Had a previous error? Invalidate this fragment. */
933 			if (unlikely(err))
934 				xenvif_idx_release(vif, pending_idx,
935 						   XEN_NETIF_RSP_OKAY);
936 			continue;
937 		}
938 
939 		/* Error on this fragment: respond to client with an error. */
940 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_ERROR);
941 
942 		/* Not the first error? Preceding frags already invalidated. */
943 		if (err)
944 			continue;
945 
946 		/* First error: invalidate header and preceding fragments. */
947 		pending_idx = *((u16 *)skb->data);
948 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
949 		for (j = start; j < i; j++) {
950 			pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
951 			xenvif_idx_release(vif, pending_idx,
952 					   XEN_NETIF_RSP_OKAY);
953 		}
954 
955 		/* Remember the error: invalidate all subsequent fragments. */
956 		err = newerr;
957 	}
958 
959 	*gopp = gop + 1;
960 	return err;
961 }
962 
963 static void xenvif_fill_frags(struct xenvif *vif, struct sk_buff *skb)
964 {
965 	struct skb_shared_info *shinfo = skb_shinfo(skb);
966 	int nr_frags = shinfo->nr_frags;
967 	int i;
968 
969 	for (i = 0; i < nr_frags; i++) {
970 		skb_frag_t *frag = shinfo->frags + i;
971 		struct xen_netif_tx_request *txp;
972 		struct page *page;
973 		u16 pending_idx;
974 
975 		pending_idx = frag_get_pending_idx(frag);
976 
977 		txp = &vif->pending_tx_info[pending_idx].req;
978 		page = virt_to_page(idx_to_kaddr(vif, pending_idx));
979 		__skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
980 		skb->len += txp->size;
981 		skb->data_len += txp->size;
982 		skb->truesize += txp->size;
983 
984 		/* Take an extra reference to offset xenvif_idx_release */
985 		get_page(vif->mmap_pages[pending_idx]);
986 		xenvif_idx_release(vif, pending_idx, XEN_NETIF_RSP_OKAY);
987 	}
988 }
989 
990 static int xenvif_get_extras(struct xenvif *vif,
991 				struct xen_netif_extra_info *extras,
992 				int work_to_do)
993 {
994 	struct xen_netif_extra_info extra;
995 	RING_IDX cons = vif->tx.req_cons;
996 
997 	do {
998 		if (unlikely(work_to_do-- <= 0)) {
999 			netdev_err(vif->dev, "Missing extra info\n");
1000 			xenvif_fatal_tx_err(vif);
1001 			return -EBADR;
1002 		}
1003 
1004 		memcpy(&extra, RING_GET_REQUEST(&vif->tx, cons),
1005 		       sizeof(extra));
1006 		if (unlikely(!extra.type ||
1007 			     extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1008 			vif->tx.req_cons = ++cons;
1009 			netdev_err(vif->dev,
1010 				   "Invalid extra type: %d\n", extra.type);
1011 			xenvif_fatal_tx_err(vif);
1012 			return -EINVAL;
1013 		}
1014 
1015 		memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
1016 		vif->tx.req_cons = ++cons;
1017 	} while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
1018 
1019 	return work_to_do;
1020 }
1021 
1022 static int xenvif_set_skb_gso(struct xenvif *vif,
1023 			      struct sk_buff *skb,
1024 			      struct xen_netif_extra_info *gso)
1025 {
1026 	if (!gso->u.gso.size) {
1027 		netdev_err(vif->dev, "GSO size must not be zero.\n");
1028 		xenvif_fatal_tx_err(vif);
1029 		return -EINVAL;
1030 	}
1031 
1032 	switch (gso->u.gso.type) {
1033 	case XEN_NETIF_GSO_TYPE_TCPV4:
1034 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1035 		break;
1036 	case XEN_NETIF_GSO_TYPE_TCPV6:
1037 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1038 		break;
1039 	default:
1040 		netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
1041 		xenvif_fatal_tx_err(vif);
1042 		return -EINVAL;
1043 	}
1044 
1045 	skb_shinfo(skb)->gso_size = gso->u.gso.size;
1046 
1047 	/* Header must be checked, and gso_segs computed. */
1048 	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1049 	skb_shinfo(skb)->gso_segs = 0;
1050 
1051 	return 0;
1052 }
1053 
1054 static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
1055 				  unsigned int max)
1056 {
1057 	if (skb_headlen(skb) >= len)
1058 		return 0;
1059 
1060 	/* If we need to pullup then pullup to the max, so we
1061 	 * won't need to do it again.
1062 	 */
1063 	if (max > skb->len)
1064 		max = skb->len;
1065 
1066 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
1067 		return -ENOMEM;
1068 
1069 	if (skb_headlen(skb) < len)
1070 		return -EPROTO;
1071 
1072 	return 0;
1073 }
1074 
1075 /* This value should be large enough to cover a tagged ethernet header plus
1076  * maximally sized IP and TCP or UDP headers.
1077  */
1078 #define MAX_IP_HDR_LEN 128
1079 
1080 static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
1081 			     int recalculate_partial_csum)
1082 {
1083 	unsigned int off;
1084 	bool fragment;
1085 	int err;
1086 
1087 	fragment = false;
1088 
1089 	err = maybe_pull_tail(skb,
1090 			      sizeof(struct iphdr),
1091 			      MAX_IP_HDR_LEN);
1092 	if (err < 0)
1093 		goto out;
1094 
1095 	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
1096 		fragment = true;
1097 
1098 	off = ip_hdrlen(skb);
1099 
1100 	err = -EPROTO;
1101 
1102 	switch (ip_hdr(skb)->protocol) {
1103 	case IPPROTO_TCP:
1104 		if (!skb_partial_csum_set(skb, off,
1105 					  offsetof(struct tcphdr, check)))
1106 			goto out;
1107 
1108 		if (recalculate_partial_csum) {
1109 			err = maybe_pull_tail(skb,
1110 					      off + sizeof(struct tcphdr),
1111 					      MAX_IP_HDR_LEN);
1112 			if (err < 0)
1113 				goto out;
1114 
1115 			tcp_hdr(skb)->check =
1116 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1117 						   ip_hdr(skb)->daddr,
1118 						   skb->len - off,
1119 						   IPPROTO_TCP, 0);
1120 		}
1121 		break;
1122 	case IPPROTO_UDP:
1123 		if (!skb_partial_csum_set(skb, off,
1124 					  offsetof(struct udphdr, check)))
1125 			goto out;
1126 
1127 		if (recalculate_partial_csum) {
1128 			err = maybe_pull_tail(skb,
1129 					      off + sizeof(struct udphdr),
1130 					      MAX_IP_HDR_LEN);
1131 			if (err < 0)
1132 				goto out;
1133 
1134 			udp_hdr(skb)->check =
1135 				~csum_tcpudp_magic(ip_hdr(skb)->saddr,
1136 						   ip_hdr(skb)->daddr,
1137 						   skb->len - off,
1138 						   IPPROTO_UDP, 0);
1139 		}
1140 		break;
1141 	default:
1142 		goto out;
1143 	}
1144 
1145 	err = 0;
1146 
1147 out:
1148 	return err;
1149 }
1150 
1151 /* This value should be large enough to cover a tagged ethernet header plus
1152  * an IPv6 header, all options, and a maximal TCP or UDP header.
1153  */
1154 #define MAX_IPV6_HDR_LEN 256
1155 
1156 #define OPT_HDR(type, skb, off) \
1157 	(type *)(skb_network_header(skb) + (off))
1158 
1159 static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
1160 			       int recalculate_partial_csum)
1161 {
1162 	int err;
1163 	u8 nexthdr;
1164 	unsigned int off;
1165 	unsigned int len;
1166 	bool fragment;
1167 	bool done;
1168 
1169 	fragment = false;
1170 	done = false;
1171 
1172 	off = sizeof(struct ipv6hdr);
1173 
1174 	err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
1175 	if (err < 0)
1176 		goto out;
1177 
1178 	nexthdr = ipv6_hdr(skb)->nexthdr;
1179 
1180 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
1181 	while (off <= len && !done) {
1182 		switch (nexthdr) {
1183 		case IPPROTO_DSTOPTS:
1184 		case IPPROTO_HOPOPTS:
1185 		case IPPROTO_ROUTING: {
1186 			struct ipv6_opt_hdr *hp;
1187 
1188 			err = maybe_pull_tail(skb,
1189 					      off +
1190 					      sizeof(struct ipv6_opt_hdr),
1191 					      MAX_IPV6_HDR_LEN);
1192 			if (err < 0)
1193 				goto out;
1194 
1195 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
1196 			nexthdr = hp->nexthdr;
1197 			off += ipv6_optlen(hp);
1198 			break;
1199 		}
1200 		case IPPROTO_AH: {
1201 			struct ip_auth_hdr *hp;
1202 
1203 			err = maybe_pull_tail(skb,
1204 					      off +
1205 					      sizeof(struct ip_auth_hdr),
1206 					      MAX_IPV6_HDR_LEN);
1207 			if (err < 0)
1208 				goto out;
1209 
1210 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
1211 			nexthdr = hp->nexthdr;
1212 			off += ipv6_authlen(hp);
1213 			break;
1214 		}
1215 		case IPPROTO_FRAGMENT: {
1216 			struct frag_hdr *hp;
1217 
1218 			err = maybe_pull_tail(skb,
1219 					      off +
1220 					      sizeof(struct frag_hdr),
1221 					      MAX_IPV6_HDR_LEN);
1222 			if (err < 0)
1223 				goto out;
1224 
1225 			hp = OPT_HDR(struct frag_hdr, skb, off);
1226 
1227 			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
1228 				fragment = true;
1229 
1230 			nexthdr = hp->nexthdr;
1231 			off += sizeof(struct frag_hdr);
1232 			break;
1233 		}
1234 		default:
1235 			done = true;
1236 			break;
1237 		}
1238 	}
1239 
1240 	err = -EPROTO;
1241 
1242 	if (!done || fragment)
1243 		goto out;
1244 
1245 	switch (nexthdr) {
1246 	case IPPROTO_TCP:
1247 		if (!skb_partial_csum_set(skb, off,
1248 					  offsetof(struct tcphdr, check)))
1249 			goto out;
1250 
1251 		if (recalculate_partial_csum) {
1252 			err = maybe_pull_tail(skb,
1253 					      off + sizeof(struct tcphdr),
1254 					      MAX_IPV6_HDR_LEN);
1255 			if (err < 0)
1256 				goto out;
1257 
1258 			tcp_hdr(skb)->check =
1259 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1260 						 &ipv6_hdr(skb)->daddr,
1261 						 skb->len - off,
1262 						 IPPROTO_TCP, 0);
1263 		}
1264 		break;
1265 	case IPPROTO_UDP:
1266 		if (!skb_partial_csum_set(skb, off,
1267 					  offsetof(struct udphdr, check)))
1268 			goto out;
1269 
1270 		if (recalculate_partial_csum) {
1271 			err = maybe_pull_tail(skb,
1272 					      off + sizeof(struct udphdr),
1273 					      MAX_IPV6_HDR_LEN);
1274 			if (err < 0)
1275 				goto out;
1276 
1277 			udp_hdr(skb)->check =
1278 				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
1279 						 &ipv6_hdr(skb)->daddr,
1280 						 skb->len - off,
1281 						 IPPROTO_UDP, 0);
1282 		}
1283 		break;
1284 	default:
1285 		goto out;
1286 	}
1287 
1288 	err = 0;
1289 
1290 out:
1291 	return err;
1292 }
1293 
1294 static int checksum_setup(struct xenvif *vif, struct sk_buff *skb)
1295 {
1296 	int err = -EPROTO;
1297 	int recalculate_partial_csum = 0;
1298 
1299 	/* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
1300 	 * peers can fail to set NETRXF_csum_blank when sending a GSO
1301 	 * frame. In this case force the SKB to CHECKSUM_PARTIAL and
1302 	 * recalculate the partial checksum.
1303 	 */
1304 	if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
1305 		vif->rx_gso_checksum_fixup++;
1306 		skb->ip_summed = CHECKSUM_PARTIAL;
1307 		recalculate_partial_csum = 1;
1308 	}
1309 
1310 	/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
1311 	if (skb->ip_summed != CHECKSUM_PARTIAL)
1312 		return 0;
1313 
1314 	if (skb->protocol == htons(ETH_P_IP))
1315 		err = checksum_setup_ip(vif, skb, recalculate_partial_csum);
1316 	else if (skb->protocol == htons(ETH_P_IPV6))
1317 		err = checksum_setup_ipv6(vif, skb, recalculate_partial_csum);
1318 
1319 	return err;
1320 }
1321 
1322 static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
1323 {
1324 	u64 now = get_jiffies_64();
1325 	u64 next_credit = vif->credit_window_start +
1326 		msecs_to_jiffies(vif->credit_usec / 1000);
1327 
1328 	/* Timer could already be pending in rare cases. */
1329 	if (timer_pending(&vif->credit_timeout))
1330 		return true;
1331 
1332 	/* Passed the point where we can replenish credit? */
1333 	if (time_after_eq64(now, next_credit)) {
1334 		vif->credit_window_start = now;
1335 		tx_add_credit(vif);
1336 	}
1337 
1338 	/* Still too big to send right now? Set a callback. */
1339 	if (size > vif->remaining_credit) {
1340 		vif->credit_timeout.data     =
1341 			(unsigned long)vif;
1342 		vif->credit_timeout.function =
1343 			tx_credit_callback;
1344 		mod_timer(&vif->credit_timeout,
1345 			  next_credit);
1346 		vif->credit_window_start = next_credit;
1347 
1348 		return true;
1349 	}
1350 
1351 	return false;
1352 }
1353 
1354 static unsigned xenvif_tx_build_gops(struct xenvif *vif)
1355 {
1356 	struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
1357 	struct sk_buff *skb;
1358 	int ret;
1359 
1360 	while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1361 		< MAX_PENDING_REQS)) {
1362 		struct xen_netif_tx_request txreq;
1363 		struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
1364 		struct page *page;
1365 		struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
1366 		u16 pending_idx;
1367 		RING_IDX idx;
1368 		int work_to_do;
1369 		unsigned int data_len;
1370 		pending_ring_idx_t index;
1371 
1372 		if (vif->tx.sring->req_prod - vif->tx.req_cons >
1373 		    XEN_NETIF_TX_RING_SIZE) {
1374 			netdev_err(vif->dev,
1375 				   "Impossible number of requests. "
1376 				   "req_prod %d, req_cons %d, size %ld\n",
1377 				   vif->tx.sring->req_prod, vif->tx.req_cons,
1378 				   XEN_NETIF_TX_RING_SIZE);
1379 			xenvif_fatal_tx_err(vif);
1380 			continue;
1381 		}
1382 
1383 		RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
1384 		if (!work_to_do)
1385 			break;
1386 
1387 		idx = vif->tx.req_cons;
1388 		rmb(); /* Ensure that we see the request before we copy it. */
1389 		memcpy(&txreq, RING_GET_REQUEST(&vif->tx, idx), sizeof(txreq));
1390 
1391 		/* Credit-based scheduling. */
1392 		if (txreq.size > vif->remaining_credit &&
1393 		    tx_credit_exceeded(vif, txreq.size))
1394 			break;
1395 
1396 		vif->remaining_credit -= txreq.size;
1397 
1398 		work_to_do--;
1399 		vif->tx.req_cons = ++idx;
1400 
1401 		memset(extras, 0, sizeof(extras));
1402 		if (txreq.flags & XEN_NETTXF_extra_info) {
1403 			work_to_do = xenvif_get_extras(vif, extras,
1404 						       work_to_do);
1405 			idx = vif->tx.req_cons;
1406 			if (unlikely(work_to_do < 0))
1407 				break;
1408 		}
1409 
1410 		ret = xenvif_count_requests(vif, &txreq, txfrags, work_to_do);
1411 		if (unlikely(ret < 0))
1412 			break;
1413 
1414 		idx += ret;
1415 
1416 		if (unlikely(txreq.size < ETH_HLEN)) {
1417 			netdev_dbg(vif->dev,
1418 				   "Bad packet size: %d\n", txreq.size);
1419 			xenvif_tx_err(vif, &txreq, idx);
1420 			break;
1421 		}
1422 
1423 		/* No crossing a page as the payload mustn't fragment. */
1424 		if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
1425 			netdev_err(vif->dev,
1426 				   "txreq.offset: %x, size: %u, end: %lu\n",
1427 				   txreq.offset, txreq.size,
1428 				   (txreq.offset&~PAGE_MASK) + txreq.size);
1429 			xenvif_fatal_tx_err(vif);
1430 			break;
1431 		}
1432 
1433 		index = pending_index(vif->pending_cons);
1434 		pending_idx = vif->pending_ring[index];
1435 
1436 		data_len = (txreq.size > PKT_PROT_LEN &&
1437 			    ret < XEN_NETBK_LEGACY_SLOTS_MAX) ?
1438 			PKT_PROT_LEN : txreq.size;
1439 
1440 		skb = alloc_skb(data_len + NET_SKB_PAD + NET_IP_ALIGN,
1441 				GFP_ATOMIC | __GFP_NOWARN);
1442 		if (unlikely(skb == NULL)) {
1443 			netdev_dbg(vif->dev,
1444 				   "Can't allocate a skb in start_xmit.\n");
1445 			xenvif_tx_err(vif, &txreq, idx);
1446 			break;
1447 		}
1448 
1449 		/* Packets passed to netif_rx() must have some headroom. */
1450 		skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
1451 
1452 		if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1453 			struct xen_netif_extra_info *gso;
1454 			gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1455 
1456 			if (xenvif_set_skb_gso(vif, skb, gso)) {
1457 				/* Failure in xenvif_set_skb_gso is fatal. */
1458 				kfree_skb(skb);
1459 				break;
1460 			}
1461 		}
1462 
1463 		/* XXX could copy straight to head */
1464 		page = xenvif_alloc_page(vif, pending_idx);
1465 		if (!page) {
1466 			kfree_skb(skb);
1467 			xenvif_tx_err(vif, &txreq, idx);
1468 			break;
1469 		}
1470 
1471 		gop->source.u.ref = txreq.gref;
1472 		gop->source.domid = vif->domid;
1473 		gop->source.offset = txreq.offset;
1474 
1475 		gop->dest.u.gmfn = virt_to_mfn(page_address(page));
1476 		gop->dest.domid = DOMID_SELF;
1477 		gop->dest.offset = txreq.offset;
1478 
1479 		gop->len = txreq.size;
1480 		gop->flags = GNTCOPY_source_gref;
1481 
1482 		gop++;
1483 
1484 		memcpy(&vif->pending_tx_info[pending_idx].req,
1485 		       &txreq, sizeof(txreq));
1486 		vif->pending_tx_info[pending_idx].head = index;
1487 		*((u16 *)skb->data) = pending_idx;
1488 
1489 		__skb_put(skb, data_len);
1490 
1491 		skb_shinfo(skb)->nr_frags = ret;
1492 		if (data_len < txreq.size) {
1493 			skb_shinfo(skb)->nr_frags++;
1494 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1495 					     pending_idx);
1496 		} else {
1497 			frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1498 					     INVALID_PENDING_IDX);
1499 		}
1500 
1501 		vif->pending_cons++;
1502 
1503 		request_gop = xenvif_get_requests(vif, skb, txfrags, gop);
1504 		if (request_gop == NULL) {
1505 			kfree_skb(skb);
1506 			xenvif_tx_err(vif, &txreq, idx);
1507 			break;
1508 		}
1509 		gop = request_gop;
1510 
1511 		__skb_queue_tail(&vif->tx_queue, skb);
1512 
1513 		vif->tx.req_cons = idx;
1514 
1515 		if ((gop-vif->tx_copy_ops) >= ARRAY_SIZE(vif->tx_copy_ops))
1516 			break;
1517 	}
1518 
1519 	return gop - vif->tx_copy_ops;
1520 }
1521 
1522 
1523 static int xenvif_tx_submit(struct xenvif *vif, int budget)
1524 {
1525 	struct gnttab_copy *gop = vif->tx_copy_ops;
1526 	struct sk_buff *skb;
1527 	int work_done = 0;
1528 
1529 	while (work_done < budget &&
1530 	       (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
1531 		struct xen_netif_tx_request *txp;
1532 		u16 pending_idx;
1533 		unsigned data_len;
1534 
1535 		pending_idx = *((u16 *)skb->data);
1536 		txp = &vif->pending_tx_info[pending_idx].req;
1537 
1538 		/* Check the remap error code. */
1539 		if (unlikely(xenvif_tx_check_gop(vif, skb, &gop))) {
1540 			netdev_dbg(vif->dev, "netback grant failed.\n");
1541 			skb_shinfo(skb)->nr_frags = 0;
1542 			kfree_skb(skb);
1543 			continue;
1544 		}
1545 
1546 		data_len = skb->len;
1547 		memcpy(skb->data,
1548 		       (void *)(idx_to_kaddr(vif, pending_idx)|txp->offset),
1549 		       data_len);
1550 		if (data_len < txp->size) {
1551 			/* Append the packet payload as a fragment. */
1552 			txp->offset += data_len;
1553 			txp->size -= data_len;
1554 		} else {
1555 			/* Schedule a response immediately. */
1556 			xenvif_idx_release(vif, pending_idx,
1557 					   XEN_NETIF_RSP_OKAY);
1558 		}
1559 
1560 		if (txp->flags & XEN_NETTXF_csum_blank)
1561 			skb->ip_summed = CHECKSUM_PARTIAL;
1562 		else if (txp->flags & XEN_NETTXF_data_validated)
1563 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1564 
1565 		xenvif_fill_frags(vif, skb);
1566 
1567 		if (skb_is_nonlinear(skb) && skb_headlen(skb) < PKT_PROT_LEN) {
1568 			int target = min_t(int, skb->len, PKT_PROT_LEN);
1569 			__pskb_pull_tail(skb, target - skb_headlen(skb));
1570 		}
1571 
1572 		skb->dev      = vif->dev;
1573 		skb->protocol = eth_type_trans(skb, skb->dev);
1574 		skb_reset_network_header(skb);
1575 
1576 		if (checksum_setup(vif, skb)) {
1577 			netdev_dbg(vif->dev,
1578 				   "Can't setup checksum in net_tx_action\n");
1579 			kfree_skb(skb);
1580 			continue;
1581 		}
1582 
1583 		skb_probe_transport_header(skb, 0);
1584 
1585 		vif->dev->stats.rx_bytes += skb->len;
1586 		vif->dev->stats.rx_packets++;
1587 
1588 		work_done++;
1589 
1590 		netif_receive_skb(skb);
1591 	}
1592 
1593 	return work_done;
1594 }
1595 
1596 /* Called after netfront has transmitted */
1597 int xenvif_tx_action(struct xenvif *vif, int budget)
1598 {
1599 	unsigned nr_gops;
1600 	int work_done;
1601 
1602 	if (unlikely(!tx_work_todo(vif)))
1603 		return 0;
1604 
1605 	nr_gops = xenvif_tx_build_gops(vif);
1606 
1607 	if (nr_gops == 0)
1608 		return 0;
1609 
1610 	gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
1611 
1612 	work_done = xenvif_tx_submit(vif, nr_gops);
1613 
1614 	return work_done;
1615 }
1616 
1617 static void xenvif_idx_release(struct xenvif *vif, u16 pending_idx,
1618 			       u8 status)
1619 {
1620 	struct pending_tx_info *pending_tx_info;
1621 	pending_ring_idx_t head;
1622 	u16 peek; /* peek into next tx request */
1623 
1624 	BUG_ON(vif->mmap_pages[pending_idx] == (void *)(~0UL));
1625 
1626 	/* Already complete? */
1627 	if (vif->mmap_pages[pending_idx] == NULL)
1628 		return;
1629 
1630 	pending_tx_info = &vif->pending_tx_info[pending_idx];
1631 
1632 	head = pending_tx_info->head;
1633 
1634 	BUG_ON(!pending_tx_is_head(vif, head));
1635 	BUG_ON(vif->pending_ring[pending_index(head)] != pending_idx);
1636 
1637 	do {
1638 		pending_ring_idx_t index;
1639 		pending_ring_idx_t idx = pending_index(head);
1640 		u16 info_idx = vif->pending_ring[idx];
1641 
1642 		pending_tx_info = &vif->pending_tx_info[info_idx];
1643 		make_tx_response(vif, &pending_tx_info->req, status);
1644 
1645 		/* Setting any number other than
1646 		 * INVALID_PENDING_RING_IDX indicates this slot is
1647 		 * starting a new packet / ending a previous packet.
1648 		 */
1649 		pending_tx_info->head = 0;
1650 
1651 		index = pending_index(vif->pending_prod++);
1652 		vif->pending_ring[index] = vif->pending_ring[info_idx];
1653 
1654 		peek = vif->pending_ring[pending_index(++head)];
1655 
1656 	} while (!pending_tx_is_head(vif, peek));
1657 
1658 	put_page(vif->mmap_pages[pending_idx]);
1659 	vif->mmap_pages[pending_idx] = NULL;
1660 }
1661 
1662 
1663 static void make_tx_response(struct xenvif *vif,
1664 			     struct xen_netif_tx_request *txp,
1665 			     s8       st)
1666 {
1667 	RING_IDX i = vif->tx.rsp_prod_pvt;
1668 	struct xen_netif_tx_response *resp;
1669 	int notify;
1670 
1671 	resp = RING_GET_RESPONSE(&vif->tx, i);
1672 	resp->id     = txp->id;
1673 	resp->status = st;
1674 
1675 	if (txp->flags & XEN_NETTXF_extra_info)
1676 		RING_GET_RESPONSE(&vif->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1677 
1678 	vif->tx.rsp_prod_pvt = ++i;
1679 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->tx, notify);
1680 	if (notify)
1681 		notify_remote_via_irq(vif->tx_irq);
1682 }
1683 
1684 static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
1685 					     u16      id,
1686 					     s8       st,
1687 					     u16      offset,
1688 					     u16      size,
1689 					     u16      flags)
1690 {
1691 	RING_IDX i = vif->rx.rsp_prod_pvt;
1692 	struct xen_netif_rx_response *resp;
1693 
1694 	resp = RING_GET_RESPONSE(&vif->rx, i);
1695 	resp->offset     = offset;
1696 	resp->flags      = flags;
1697 	resp->id         = id;
1698 	resp->status     = (s16)size;
1699 	if (st < 0)
1700 		resp->status = (s16)st;
1701 
1702 	vif->rx.rsp_prod_pvt = ++i;
1703 
1704 	return resp;
1705 }
1706 
1707 static inline int rx_work_todo(struct xenvif *vif)
1708 {
1709 	return !skb_queue_empty(&vif->rx_queue) || vif->rx_event;
1710 }
1711 
1712 static inline int tx_work_todo(struct xenvif *vif)
1713 {
1714 
1715 	if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->tx)) &&
1716 	    (nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
1717 	     < MAX_PENDING_REQS))
1718 		return 1;
1719 
1720 	return 0;
1721 }
1722 
1723 void xenvif_unmap_frontend_rings(struct xenvif *vif)
1724 {
1725 	if (vif->tx.sring)
1726 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1727 					vif->tx.sring);
1728 	if (vif->rx.sring)
1729 		xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
1730 					vif->rx.sring);
1731 }
1732 
1733 int xenvif_map_frontend_rings(struct xenvif *vif,
1734 			      grant_ref_t tx_ring_ref,
1735 			      grant_ref_t rx_ring_ref)
1736 {
1737 	void *addr;
1738 	struct xen_netif_tx_sring *txs;
1739 	struct xen_netif_rx_sring *rxs;
1740 
1741 	int err = -ENOMEM;
1742 
1743 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1744 				     tx_ring_ref, &addr);
1745 	if (err)
1746 		goto err;
1747 
1748 	txs = (struct xen_netif_tx_sring *)addr;
1749 	BACK_RING_INIT(&vif->tx, txs, PAGE_SIZE);
1750 
1751 	err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
1752 				     rx_ring_ref, &addr);
1753 	if (err)
1754 		goto err;
1755 
1756 	rxs = (struct xen_netif_rx_sring *)addr;
1757 	BACK_RING_INIT(&vif->rx, rxs, PAGE_SIZE);
1758 
1759 	return 0;
1760 
1761 err:
1762 	xenvif_unmap_frontend_rings(vif);
1763 	return err;
1764 }
1765 
1766 void xenvif_stop_queue(struct xenvif *vif)
1767 {
1768 	if (!vif->can_queue)
1769 		return;
1770 
1771 	netif_stop_queue(vif->dev);
1772 }
1773 
1774 static void xenvif_start_queue(struct xenvif *vif)
1775 {
1776 	if (xenvif_schedulable(vif))
1777 		netif_wake_queue(vif->dev);
1778 }
1779 
1780 int xenvif_kthread(void *data)
1781 {
1782 	struct xenvif *vif = data;
1783 	struct sk_buff *skb;
1784 
1785 	while (!kthread_should_stop()) {
1786 		wait_event_interruptible(vif->wq,
1787 					 rx_work_todo(vif) ||
1788 					 kthread_should_stop());
1789 		if (kthread_should_stop())
1790 			break;
1791 
1792 		if (!skb_queue_empty(&vif->rx_queue))
1793 			xenvif_rx_action(vif);
1794 
1795 		vif->rx_event = false;
1796 
1797 		if (skb_queue_empty(&vif->rx_queue) &&
1798 		    netif_queue_stopped(vif->dev))
1799 			xenvif_start_queue(vif);
1800 
1801 		cond_resched();
1802 	}
1803 
1804 	/* Bin any remaining skbs */
1805 	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL)
1806 		dev_kfree_skb(skb);
1807 
1808 	return 0;
1809 }
1810 
1811 static int __init netback_init(void)
1812 {
1813 	int rc = 0;
1814 
1815 	if (!xen_domain())
1816 		return -ENODEV;
1817 
1818 	if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1819 		pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
1820 			fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
1821 		fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
1822 	}
1823 
1824 	rc = xenvif_xenbus_init();
1825 	if (rc)
1826 		goto failed_init;
1827 
1828 	return 0;
1829 
1830 failed_init:
1831 	return rc;
1832 }
1833 
1834 module_init(netback_init);
1835 
1836 static void __exit netback_fini(void)
1837 {
1838 	xenvif_xenbus_fini();
1839 }
1840 module_exit(netback_fini);
1841 
1842 MODULE_LICENSE("Dual BSD/GPL");
1843 MODULE_ALIAS("xen-backend:vif");
1844