xref: /linux/drivers/net/xen-netback/rx.c (revision d1ef006dc116bf6487426b0b50c1bf2bf51e6423)
13254f836SPaul Durrant /*
23254f836SPaul Durrant  * Copyright (c) 2016 Citrix Systems Inc.
33254f836SPaul Durrant  * Copyright (c) 2002-2005, K A Fraser
43254f836SPaul Durrant  *
53254f836SPaul Durrant  * This program is free software; you can redistribute it and/or
63254f836SPaul Durrant  * modify it under the terms of the GNU General Public License version 2
73254f836SPaul Durrant  * as published by the Free Software Foundation; or, when distributed
83254f836SPaul Durrant  * separately from the Linux kernel or incorporated into other
93254f836SPaul Durrant  * software packages, subject to the following license:
103254f836SPaul Durrant  *
113254f836SPaul Durrant  * Permission is hereby granted, free of charge, to any person obtaining a copy
123254f836SPaul Durrant  * of this source file (the "Software"), to deal in the Software without
133254f836SPaul Durrant  * restriction, including without limitation the rights to use, copy, modify,
143254f836SPaul Durrant  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
153254f836SPaul Durrant  * and to permit persons to whom the Software is furnished to do so, subject to
163254f836SPaul Durrant  * the following conditions:
173254f836SPaul Durrant  *
183254f836SPaul Durrant  * The above copyright notice and this permission notice shall be included in
193254f836SPaul Durrant  * all copies or substantial portions of the Software.
203254f836SPaul Durrant  *
213254f836SPaul Durrant  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
223254f836SPaul Durrant  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
233254f836SPaul Durrant  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
243254f836SPaul Durrant  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
253254f836SPaul Durrant  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
263254f836SPaul Durrant  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
273254f836SPaul Durrant  * IN THE SOFTWARE.
283254f836SPaul Durrant  */
293254f836SPaul Durrant #include "common.h"
303254f836SPaul Durrant 
313254f836SPaul Durrant #include <linux/kthread.h>
323254f836SPaul Durrant 
333254f836SPaul Durrant #include <xen/xen.h>
343254f836SPaul Durrant #include <xen/events.h>
353254f836SPaul Durrant 
363254f836SPaul Durrant static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
373254f836SPaul Durrant {
383254f836SPaul Durrant 	RING_IDX prod, cons;
393254f836SPaul Durrant 	struct sk_buff *skb;
403254f836SPaul Durrant 	int needed;
413254f836SPaul Durrant 
423254f836SPaul Durrant 	skb = skb_peek(&queue->rx_queue);
433254f836SPaul Durrant 	if (!skb)
443254f836SPaul Durrant 		return false;
453254f836SPaul Durrant 
463254f836SPaul Durrant 	needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
473254f836SPaul Durrant 	if (skb_is_gso(skb))
483254f836SPaul Durrant 		needed++;
493254f836SPaul Durrant 	if (skb->sw_hash)
503254f836SPaul Durrant 		needed++;
513254f836SPaul Durrant 
523254f836SPaul Durrant 	do {
533254f836SPaul Durrant 		prod = queue->rx.sring->req_prod;
543254f836SPaul Durrant 		cons = queue->rx.req_cons;
553254f836SPaul Durrant 
563254f836SPaul Durrant 		if (prod - cons >= needed)
573254f836SPaul Durrant 			return true;
583254f836SPaul Durrant 
593254f836SPaul Durrant 		queue->rx.sring->req_event = prod + 1;
603254f836SPaul Durrant 
613254f836SPaul Durrant 		/* Make sure event is visible before we check prod
623254f836SPaul Durrant 		 * again.
633254f836SPaul Durrant 		 */
643254f836SPaul Durrant 		mb();
653254f836SPaul Durrant 	} while (queue->rx.sring->req_prod != prod);
663254f836SPaul Durrant 
673254f836SPaul Durrant 	return false;
683254f836SPaul Durrant }
693254f836SPaul Durrant 
703254f836SPaul Durrant void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
713254f836SPaul Durrant {
723254f836SPaul Durrant 	unsigned long flags;
733254f836SPaul Durrant 
743254f836SPaul Durrant 	spin_lock_irqsave(&queue->rx_queue.lock, flags);
753254f836SPaul Durrant 
763254f836SPaul Durrant 	__skb_queue_tail(&queue->rx_queue, skb);
773254f836SPaul Durrant 
783254f836SPaul Durrant 	queue->rx_queue_len += skb->len;
793254f836SPaul Durrant 	if (queue->rx_queue_len > queue->rx_queue_max) {
803254f836SPaul Durrant 		struct net_device *dev = queue->vif->dev;
813254f836SPaul Durrant 
823254f836SPaul Durrant 		netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
833254f836SPaul Durrant 	}
843254f836SPaul Durrant 
853254f836SPaul Durrant 	spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
863254f836SPaul Durrant }
873254f836SPaul Durrant 
883254f836SPaul Durrant static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
893254f836SPaul Durrant {
903254f836SPaul Durrant 	struct sk_buff *skb;
913254f836SPaul Durrant 
923254f836SPaul Durrant 	spin_lock_irq(&queue->rx_queue.lock);
933254f836SPaul Durrant 
943254f836SPaul Durrant 	skb = __skb_dequeue(&queue->rx_queue);
957c0b1a23SDavid Vrabel 	if (skb) {
963254f836SPaul Durrant 		queue->rx_queue_len -= skb->len;
977c0b1a23SDavid Vrabel 		if (queue->rx_queue_len < queue->rx_queue_max) {
987c0b1a23SDavid Vrabel 			struct netdev_queue *txq;
997c0b1a23SDavid Vrabel 
1007c0b1a23SDavid Vrabel 			txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
1017c0b1a23SDavid Vrabel 			netif_tx_wake_queue(txq);
1027c0b1a23SDavid Vrabel 		}
1037c0b1a23SDavid Vrabel 	}
1043254f836SPaul Durrant 
1053254f836SPaul Durrant 	spin_unlock_irq(&queue->rx_queue.lock);
1063254f836SPaul Durrant 
1073254f836SPaul Durrant 	return skb;
1083254f836SPaul Durrant }
1093254f836SPaul Durrant 
1103254f836SPaul Durrant static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
1113254f836SPaul Durrant {
1123254f836SPaul Durrant 	struct sk_buff *skb;
1133254f836SPaul Durrant 
1143254f836SPaul Durrant 	while ((skb = xenvif_rx_dequeue(queue)) != NULL)
1153254f836SPaul Durrant 		kfree_skb(skb);
1163254f836SPaul Durrant }
1173254f836SPaul Durrant 
1183254f836SPaul Durrant static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
1193254f836SPaul Durrant {
1203254f836SPaul Durrant 	struct sk_buff *skb;
1213254f836SPaul Durrant 
1223254f836SPaul Durrant 	for (;;) {
1233254f836SPaul Durrant 		skb = skb_peek(&queue->rx_queue);
1243254f836SPaul Durrant 		if (!skb)
1253254f836SPaul Durrant 			break;
1263254f836SPaul Durrant 		if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
1273254f836SPaul Durrant 			break;
1283254f836SPaul Durrant 		xenvif_rx_dequeue(queue);
1293254f836SPaul Durrant 		kfree_skb(skb);
1303254f836SPaul Durrant 	}
1313254f836SPaul Durrant }
1323254f836SPaul Durrant 
133eb1723a2SDavid Vrabel static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
1343254f836SPaul Durrant {
135eb1723a2SDavid Vrabel 	unsigned int i;
136a37f1229SDavid Vrabel 	int notify;
1373254f836SPaul Durrant 
138eb1723a2SDavid Vrabel 	gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
1393254f836SPaul Durrant 
140eb1723a2SDavid Vrabel 	for (i = 0; i < queue->rx_copy.num; i++) {
141eb1723a2SDavid Vrabel 		struct gnttab_copy *op;
1423254f836SPaul Durrant 
143eb1723a2SDavid Vrabel 		op = &queue->rx_copy.op[i];
1443254f836SPaul Durrant 
145eb1723a2SDavid Vrabel 		/* If the copy failed, overwrite the status field in
146eb1723a2SDavid Vrabel 		 * the corresponding response.
147eb1723a2SDavid Vrabel 		 */
148eb1723a2SDavid Vrabel 		if (unlikely(op->status != GNTST_okay)) {
149eb1723a2SDavid Vrabel 			struct xen_netif_rx_response *rsp;
150eb1723a2SDavid Vrabel 
151eb1723a2SDavid Vrabel 			rsp = RING_GET_RESPONSE(&queue->rx,
152eb1723a2SDavid Vrabel 						queue->rx_copy.idx[i]);
153eb1723a2SDavid Vrabel 			rsp->status = op->status;
154eb1723a2SDavid Vrabel 		}
1553254f836SPaul Durrant 	}
1563254f836SPaul Durrant 
157eb1723a2SDavid Vrabel 	queue->rx_copy.num = 0;
158a37f1229SDavid Vrabel 
159a37f1229SDavid Vrabel 	/* Push responses for all completed packets. */
160a37f1229SDavid Vrabel 	RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
161a37f1229SDavid Vrabel 	if (notify)
162a37f1229SDavid Vrabel 		notify_remote_via_irq(queue->rx_irq);
163a37f1229SDavid Vrabel 
164a37f1229SDavid Vrabel 	__skb_queue_purge(queue->rx_copy.completed);
165eb1723a2SDavid Vrabel }
1663254f836SPaul Durrant 
167eb1723a2SDavid Vrabel static void xenvif_rx_copy_add(struct xenvif_queue *queue,
168eb1723a2SDavid Vrabel 			       struct xen_netif_rx_request *req,
169eb1723a2SDavid Vrabel 			       unsigned int offset, void *data, size_t len)
1703254f836SPaul Durrant {
171eb1723a2SDavid Vrabel 	struct gnttab_copy *op;
172eb1723a2SDavid Vrabel 	struct page *page;
1733254f836SPaul Durrant 	struct xen_page_foreign *foreign;
1743254f836SPaul Durrant 
175eb1723a2SDavid Vrabel 	if (queue->rx_copy.num == COPY_BATCH_SIZE)
176eb1723a2SDavid Vrabel 		xenvif_rx_copy_flush(queue);
1773254f836SPaul Durrant 
178eb1723a2SDavid Vrabel 	op = &queue->rx_copy.op[queue->rx_copy.num];
1793254f836SPaul Durrant 
180eb1723a2SDavid Vrabel 	page = virt_to_page(data);
1813254f836SPaul Durrant 
182eb1723a2SDavid Vrabel 	op->flags = GNTCOPY_dest_gref;
1833254f836SPaul Durrant 
1843254f836SPaul Durrant 	foreign = xen_page_foreign(page);
1853254f836SPaul Durrant 	if (foreign) {
186eb1723a2SDavid Vrabel 		op->source.domid = foreign->domid;
187eb1723a2SDavid Vrabel 		op->source.u.ref = foreign->gref;
188eb1723a2SDavid Vrabel 		op->flags |= GNTCOPY_source_gref;
1893254f836SPaul Durrant 	} else {
190eb1723a2SDavid Vrabel 		op->source.u.gmfn = virt_to_gfn(data);
191eb1723a2SDavid Vrabel 		op->source.domid  = DOMID_SELF;
1923254f836SPaul Durrant 	}
1933254f836SPaul Durrant 
194eb1723a2SDavid Vrabel 	op->source.offset = xen_offset_in_page(data);
195eb1723a2SDavid Vrabel 	op->dest.u.ref    = req->gref;
196eb1723a2SDavid Vrabel 	op->dest.domid    = queue->vif->domid;
197eb1723a2SDavid Vrabel 	op->dest.offset   = offset;
198eb1723a2SDavid Vrabel 	op->len           = len;
199eb1723a2SDavid Vrabel 
200eb1723a2SDavid Vrabel 	queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
201eb1723a2SDavid Vrabel 	queue->rx_copy.num++;
202eb1723a2SDavid Vrabel }
203eb1723a2SDavid Vrabel 
204eb1723a2SDavid Vrabel static unsigned int xenvif_gso_type(struct sk_buff *skb)
2053254f836SPaul Durrant {
2063254f836SPaul Durrant 	if (skb_is_gso(skb)) {
2073254f836SPaul Durrant 		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
208eb1723a2SDavid Vrabel 			return XEN_NETIF_GSO_TYPE_TCPV4;
2093254f836SPaul Durrant 		else
210eb1723a2SDavid Vrabel 			return XEN_NETIF_GSO_TYPE_TCPV6;
2113254f836SPaul Durrant 	}
212eb1723a2SDavid Vrabel 	return XEN_NETIF_GSO_TYPE_NONE;
2133254f836SPaul Durrant }
2143254f836SPaul Durrant 
215eb1723a2SDavid Vrabel struct xenvif_pkt_state {
2163254f836SPaul Durrant 	struct sk_buff *skb;
217eb1723a2SDavid Vrabel 	size_t remaining_len;
2182167ca02SRoss Lagerwall 	struct sk_buff *frag_iter;
2192167ca02SRoss Lagerwall 	int frag; /* frag == -1 => frag_iter->head */
220eb1723a2SDavid Vrabel 	unsigned int frag_offset;
221eb1723a2SDavid Vrabel 	struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
222eb1723a2SDavid Vrabel 	unsigned int extra_count;
223eb1723a2SDavid Vrabel 	unsigned int slot;
2243254f836SPaul Durrant };
2253254f836SPaul Durrant 
226eb1723a2SDavid Vrabel static void xenvif_rx_next_skb(struct xenvif_queue *queue,
227eb1723a2SDavid Vrabel 			       struct xenvif_pkt_state *pkt)
228eb1723a2SDavid Vrabel {
229eb1723a2SDavid Vrabel 	struct sk_buff *skb;
230eb1723a2SDavid Vrabel 	unsigned int gso_type;
2313254f836SPaul Durrant 
232eb1723a2SDavid Vrabel 	skb = xenvif_rx_dequeue(queue);
2333254f836SPaul Durrant 
2343254f836SPaul Durrant 	queue->stats.tx_bytes += skb->len;
2353254f836SPaul Durrant 	queue->stats.tx_packets++;
2363254f836SPaul Durrant 
237eb1723a2SDavid Vrabel 	/* Reset packet state. */
238eb1723a2SDavid Vrabel 	memset(pkt, 0, sizeof(struct xenvif_pkt_state));
2393254f836SPaul Durrant 
240eb1723a2SDavid Vrabel 	pkt->skb = skb;
2412167ca02SRoss Lagerwall 	pkt->frag_iter = skb;
242eb1723a2SDavid Vrabel 	pkt->remaining_len = skb->len;
243eb1723a2SDavid Vrabel 	pkt->frag = -1;
2443254f836SPaul Durrant 
245eb1723a2SDavid Vrabel 	gso_type = xenvif_gso_type(skb);
246eb1723a2SDavid Vrabel 	if ((1 << gso_type) & queue->vif->gso_mask) {
247eb1723a2SDavid Vrabel 		struct xen_netif_extra_info *extra;
2483254f836SPaul Durrant 
249eb1723a2SDavid Vrabel 		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
2503254f836SPaul Durrant 
251eb1723a2SDavid Vrabel 		extra->u.gso.type = gso_type;
252eb1723a2SDavid Vrabel 		extra->u.gso.size = skb_shinfo(skb)->gso_size;
2533254f836SPaul Durrant 		extra->u.gso.pad = 0;
2543254f836SPaul Durrant 		extra->u.gso.features = 0;
2553254f836SPaul Durrant 		extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
2563254f836SPaul Durrant 		extra->flags = 0;
257eb1723a2SDavid Vrabel 
258eb1723a2SDavid Vrabel 		pkt->extra_count++;
2593254f836SPaul Durrant 	}
2603254f836SPaul Durrant 
2613254f836SPaul Durrant 	if (skb->sw_hash) {
262eb1723a2SDavid Vrabel 		struct xen_netif_extra_info *extra;
2633254f836SPaul Durrant 
264eb1723a2SDavid Vrabel 		extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
2653254f836SPaul Durrant 
2663254f836SPaul Durrant 		extra->u.hash.algorithm =
2673254f836SPaul Durrant 			XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
2683254f836SPaul Durrant 
2693254f836SPaul Durrant 		if (skb->l4_hash)
2703254f836SPaul Durrant 			extra->u.hash.type =
2713254f836SPaul Durrant 				skb->protocol == htons(ETH_P_IP) ?
2723254f836SPaul Durrant 				_XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
2733254f836SPaul Durrant 				_XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
2743254f836SPaul Durrant 		else
2753254f836SPaul Durrant 			extra->u.hash.type =
2763254f836SPaul Durrant 				skb->protocol == htons(ETH_P_IP) ?
2773254f836SPaul Durrant 				_XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
2783254f836SPaul Durrant 				_XEN_NETIF_CTRL_HASH_TYPE_IPV6;
2793254f836SPaul Durrant 
280eb1723a2SDavid Vrabel 		*(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
2813254f836SPaul Durrant 
2823254f836SPaul Durrant 		extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
2833254f836SPaul Durrant 		extra->flags = 0;
284eb1723a2SDavid Vrabel 
285eb1723a2SDavid Vrabel 		pkt->extra_count++;
286eb1723a2SDavid Vrabel 	}
2873254f836SPaul Durrant }
2883254f836SPaul Durrant 
289eb1723a2SDavid Vrabel static void xenvif_rx_complete(struct xenvif_queue *queue,
290eb1723a2SDavid Vrabel 			       struct xenvif_pkt_state *pkt)
291eb1723a2SDavid Vrabel {
292a37f1229SDavid Vrabel 	/* All responses are ready to be pushed. */
293eb1723a2SDavid Vrabel 	queue->rx.rsp_prod_pvt = queue->rx.req_cons;
294eb1723a2SDavid Vrabel 
295a37f1229SDavid Vrabel 	__skb_queue_tail(queue->rx_copy.completed, pkt->skb);
296eb1723a2SDavid Vrabel }
297eb1723a2SDavid Vrabel 
2982167ca02SRoss Lagerwall static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
2992167ca02SRoss Lagerwall {
3002167ca02SRoss Lagerwall 	struct sk_buff *frag_iter = pkt->frag_iter;
3012167ca02SRoss Lagerwall 	unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
3022167ca02SRoss Lagerwall 
3032167ca02SRoss Lagerwall 	pkt->frag++;
3042167ca02SRoss Lagerwall 	pkt->frag_offset = 0;
3052167ca02SRoss Lagerwall 
3062167ca02SRoss Lagerwall 	if (pkt->frag >= nr_frags) {
3072167ca02SRoss Lagerwall 		if (frag_iter == pkt->skb)
3082167ca02SRoss Lagerwall 			pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
3092167ca02SRoss Lagerwall 		else
3102167ca02SRoss Lagerwall 			pkt->frag_iter = frag_iter->next;
3112167ca02SRoss Lagerwall 
3122167ca02SRoss Lagerwall 		pkt->frag = -1;
3132167ca02SRoss Lagerwall 	}
3142167ca02SRoss Lagerwall }
3152167ca02SRoss Lagerwall 
316eb1723a2SDavid Vrabel static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
317eb1723a2SDavid Vrabel 				 struct xenvif_pkt_state *pkt,
318eb1723a2SDavid Vrabel 				 unsigned int offset, void **data,
319eb1723a2SDavid Vrabel 				 size_t *len)
320eb1723a2SDavid Vrabel {
3212167ca02SRoss Lagerwall 	struct sk_buff *frag_iter = pkt->frag_iter;
322eb1723a2SDavid Vrabel 	void *frag_data;
323eb1723a2SDavid Vrabel 	size_t frag_len, chunk_len;
324eb1723a2SDavid Vrabel 
3252167ca02SRoss Lagerwall 	BUG_ON(!frag_iter);
3262167ca02SRoss Lagerwall 
327eb1723a2SDavid Vrabel 	if (pkt->frag == -1) {
3282167ca02SRoss Lagerwall 		frag_data = frag_iter->data;
3292167ca02SRoss Lagerwall 		frag_len = skb_headlen(frag_iter);
330eb1723a2SDavid Vrabel 	} else {
3312167ca02SRoss Lagerwall 		skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
332eb1723a2SDavid Vrabel 
333eb1723a2SDavid Vrabel 		frag_data = skb_frag_address(frag);
334eb1723a2SDavid Vrabel 		frag_len = skb_frag_size(frag);
335eb1723a2SDavid Vrabel 	}
336eb1723a2SDavid Vrabel 
337eb1723a2SDavid Vrabel 	frag_data += pkt->frag_offset;
338eb1723a2SDavid Vrabel 	frag_len -= pkt->frag_offset;
339eb1723a2SDavid Vrabel 
340eb1723a2SDavid Vrabel 	chunk_len = min(frag_len, XEN_PAGE_SIZE - offset);
341eb1723a2SDavid Vrabel 	chunk_len = min(chunk_len,
342eb1723a2SDavid Vrabel 			XEN_PAGE_SIZE -	xen_offset_in_page(frag_data));
343eb1723a2SDavid Vrabel 
344eb1723a2SDavid Vrabel 	pkt->frag_offset += chunk_len;
345eb1723a2SDavid Vrabel 
346eb1723a2SDavid Vrabel 	/* Advance to next frag? */
3472167ca02SRoss Lagerwall 	if (frag_len == chunk_len)
3482167ca02SRoss Lagerwall 		xenvif_rx_next_frag(pkt);
349eb1723a2SDavid Vrabel 
350eb1723a2SDavid Vrabel 	*data = frag_data;
351eb1723a2SDavid Vrabel 	*len = chunk_len;
352eb1723a2SDavid Vrabel }
353eb1723a2SDavid Vrabel 
354eb1723a2SDavid Vrabel static void xenvif_rx_data_slot(struct xenvif_queue *queue,
355eb1723a2SDavid Vrabel 				struct xenvif_pkt_state *pkt,
356eb1723a2SDavid Vrabel 				struct xen_netif_rx_request *req,
357eb1723a2SDavid Vrabel 				struct xen_netif_rx_response *rsp)
358eb1723a2SDavid Vrabel {
359eb1723a2SDavid Vrabel 	unsigned int offset = 0;
360eb1723a2SDavid Vrabel 	unsigned int flags;
361eb1723a2SDavid Vrabel 
362eb1723a2SDavid Vrabel 	do {
363eb1723a2SDavid Vrabel 		size_t len;
364eb1723a2SDavid Vrabel 		void *data;
365eb1723a2SDavid Vrabel 
366eb1723a2SDavid Vrabel 		xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
367eb1723a2SDavid Vrabel 		xenvif_rx_copy_add(queue, req, offset, data, len);
368eb1723a2SDavid Vrabel 
369eb1723a2SDavid Vrabel 		offset += len;
370eb1723a2SDavid Vrabel 		pkt->remaining_len -= len;
371eb1723a2SDavid Vrabel 
372eb1723a2SDavid Vrabel 	} while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
373eb1723a2SDavid Vrabel 
374eb1723a2SDavid Vrabel 	if (pkt->remaining_len > 0)
375eb1723a2SDavid Vrabel 		flags = XEN_NETRXF_more_data;
376eb1723a2SDavid Vrabel 	else
377eb1723a2SDavid Vrabel 		flags = 0;
378eb1723a2SDavid Vrabel 
379eb1723a2SDavid Vrabel 	if (pkt->slot == 0) {
380eb1723a2SDavid Vrabel 		struct sk_buff *skb = pkt->skb;
381eb1723a2SDavid Vrabel 
382eb1723a2SDavid Vrabel 		if (skb->ip_summed == CHECKSUM_PARTIAL)
383eb1723a2SDavid Vrabel 			flags |= XEN_NETRXF_csum_blank |
384eb1723a2SDavid Vrabel 				 XEN_NETRXF_data_validated;
385eb1723a2SDavid Vrabel 		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
386eb1723a2SDavid Vrabel 			flags |= XEN_NETRXF_data_validated;
387eb1723a2SDavid Vrabel 
388eb1723a2SDavid Vrabel 		if (pkt->extra_count != 0)
389eb1723a2SDavid Vrabel 			flags |= XEN_NETRXF_extra_info;
390eb1723a2SDavid Vrabel 	}
391eb1723a2SDavid Vrabel 
392eb1723a2SDavid Vrabel 	rsp->offset = 0;
393eb1723a2SDavid Vrabel 	rsp->flags = flags;
394eb1723a2SDavid Vrabel 	rsp->id = req->id;
395eb1723a2SDavid Vrabel 	rsp->status = (s16)offset;
396eb1723a2SDavid Vrabel }
397eb1723a2SDavid Vrabel 
398eb1723a2SDavid Vrabel static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
399eb1723a2SDavid Vrabel 				 struct xenvif_pkt_state *pkt,
400eb1723a2SDavid Vrabel 				 struct xen_netif_rx_request *req,
401eb1723a2SDavid Vrabel 				 struct xen_netif_rx_response *rsp)
402eb1723a2SDavid Vrabel {
403eb1723a2SDavid Vrabel 	struct xen_netif_extra_info *extra = (void *)rsp;
404eb1723a2SDavid Vrabel 	unsigned int i;
405eb1723a2SDavid Vrabel 
406eb1723a2SDavid Vrabel 	pkt->extra_count--;
407eb1723a2SDavid Vrabel 
408eb1723a2SDavid Vrabel 	for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
409eb1723a2SDavid Vrabel 		if (pkt->extras[i].type) {
410eb1723a2SDavid Vrabel 			*extra = pkt->extras[i];
411eb1723a2SDavid Vrabel 
412eb1723a2SDavid Vrabel 			if (pkt->extra_count != 0)
413eb1723a2SDavid Vrabel 				extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
414eb1723a2SDavid Vrabel 
415eb1723a2SDavid Vrabel 			pkt->extras[i].type = 0;
416eb1723a2SDavid Vrabel 			return;
417eb1723a2SDavid Vrabel 		}
418eb1723a2SDavid Vrabel 	}
419eb1723a2SDavid Vrabel 	BUG();
420eb1723a2SDavid Vrabel }
421eb1723a2SDavid Vrabel 
42298f6d57cSDavid Vrabel void xenvif_rx_skb(struct xenvif_queue *queue)
423eb1723a2SDavid Vrabel {
424eb1723a2SDavid Vrabel 	struct xenvif_pkt_state pkt;
425eb1723a2SDavid Vrabel 
426eb1723a2SDavid Vrabel 	xenvif_rx_next_skb(queue, &pkt);
427eb1723a2SDavid Vrabel 
428*d1ef006dSDavid Vrabel 	queue->last_rx_time = jiffies;
429*d1ef006dSDavid Vrabel 
430eb1723a2SDavid Vrabel 	do {
431eb1723a2SDavid Vrabel 		struct xen_netif_rx_request *req;
432eb1723a2SDavid Vrabel 		struct xen_netif_rx_response *rsp;
433eb1723a2SDavid Vrabel 
434eb1723a2SDavid Vrabel 		req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
435eb1723a2SDavid Vrabel 		rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
436eb1723a2SDavid Vrabel 
437eb1723a2SDavid Vrabel 		/* Extras must go after the first data slot */
438eb1723a2SDavid Vrabel 		if (pkt.slot != 0 && pkt.extra_count != 0)
439eb1723a2SDavid Vrabel 			xenvif_rx_extra_slot(queue, &pkt, req, rsp);
440eb1723a2SDavid Vrabel 		else
441eb1723a2SDavid Vrabel 			xenvif_rx_data_slot(queue, &pkt, req, rsp);
442eb1723a2SDavid Vrabel 
443eb1723a2SDavid Vrabel 		queue->rx.req_cons++;
444eb1723a2SDavid Vrabel 		pkt.slot++;
445eb1723a2SDavid Vrabel 	} while (pkt.remaining_len > 0 || pkt.extra_count != 0);
446eb1723a2SDavid Vrabel 
447eb1723a2SDavid Vrabel 	xenvif_rx_complete(queue, &pkt);
4483254f836SPaul Durrant }
4493254f836SPaul Durrant 
45098f6d57cSDavid Vrabel #define RX_BATCH_SIZE 64
45198f6d57cSDavid Vrabel 
45298f6d57cSDavid Vrabel void xenvif_rx_action(struct xenvif_queue *queue)
45398f6d57cSDavid Vrabel {
454a37f1229SDavid Vrabel 	struct sk_buff_head completed_skbs;
45598f6d57cSDavid Vrabel 	unsigned int work_done = 0;
45698f6d57cSDavid Vrabel 
457a37f1229SDavid Vrabel 	__skb_queue_head_init(&completed_skbs);
458a37f1229SDavid Vrabel 	queue->rx_copy.completed = &completed_skbs;
459a37f1229SDavid Vrabel 
46098f6d57cSDavid Vrabel 	while (xenvif_rx_ring_slots_available(queue) &&
46198f6d57cSDavid Vrabel 	       work_done < RX_BATCH_SIZE) {
46298f6d57cSDavid Vrabel 		xenvif_rx_skb(queue);
46398f6d57cSDavid Vrabel 		work_done++;
46498f6d57cSDavid Vrabel 	}
465a37f1229SDavid Vrabel 
466a37f1229SDavid Vrabel 	/* Flush any pending copies and complete all skbs. */
467a37f1229SDavid Vrabel 	xenvif_rx_copy_flush(queue);
46898f6d57cSDavid Vrabel }
46998f6d57cSDavid Vrabel 
4703254f836SPaul Durrant static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
4713254f836SPaul Durrant {
4723254f836SPaul Durrant 	RING_IDX prod, cons;
4733254f836SPaul Durrant 
4743254f836SPaul Durrant 	prod = queue->rx.sring->req_prod;
4753254f836SPaul Durrant 	cons = queue->rx.req_cons;
4763254f836SPaul Durrant 
4773254f836SPaul Durrant 	return !queue->stalled &&
4783254f836SPaul Durrant 		prod - cons < 1 &&
4793254f836SPaul Durrant 		time_after(jiffies,
4803254f836SPaul Durrant 			   queue->last_rx_time + queue->vif->stall_timeout);
4813254f836SPaul Durrant }
4823254f836SPaul Durrant 
4833254f836SPaul Durrant static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
4843254f836SPaul Durrant {
4853254f836SPaul Durrant 	RING_IDX prod, cons;
4863254f836SPaul Durrant 
4873254f836SPaul Durrant 	prod = queue->rx.sring->req_prod;
4883254f836SPaul Durrant 	cons = queue->rx.req_cons;
4893254f836SPaul Durrant 
4903254f836SPaul Durrant 	return queue->stalled && prod - cons >= 1;
4913254f836SPaul Durrant }
4923254f836SPaul Durrant 
4933254f836SPaul Durrant static bool xenvif_have_rx_work(struct xenvif_queue *queue)
4943254f836SPaul Durrant {
4953254f836SPaul Durrant 	return xenvif_rx_ring_slots_available(queue) ||
4963254f836SPaul Durrant 		(queue->vif->stall_timeout &&
4973254f836SPaul Durrant 		 (xenvif_rx_queue_stalled(queue) ||
4983254f836SPaul Durrant 		  xenvif_rx_queue_ready(queue))) ||
4993254f836SPaul Durrant 		kthread_should_stop() ||
5003254f836SPaul Durrant 		queue->vif->disabled;
5013254f836SPaul Durrant }
5023254f836SPaul Durrant 
5033254f836SPaul Durrant static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
5043254f836SPaul Durrant {
5053254f836SPaul Durrant 	struct sk_buff *skb;
5063254f836SPaul Durrant 	long timeout;
5073254f836SPaul Durrant 
5083254f836SPaul Durrant 	skb = skb_peek(&queue->rx_queue);
5093254f836SPaul Durrant 	if (!skb)
5103254f836SPaul Durrant 		return MAX_SCHEDULE_TIMEOUT;
5113254f836SPaul Durrant 
5123254f836SPaul Durrant 	timeout = XENVIF_RX_CB(skb)->expires - jiffies;
5133254f836SPaul Durrant 	return timeout < 0 ? 0 : timeout;
5143254f836SPaul Durrant }
5153254f836SPaul Durrant 
5163254f836SPaul Durrant /* Wait until the guest Rx thread has work.
5173254f836SPaul Durrant  *
5183254f836SPaul Durrant  * The timeout needs to be adjusted based on the current head of the
5193254f836SPaul Durrant  * queue (and not just the head at the beginning).  In particular, if
5203254f836SPaul Durrant  * the queue is initially empty an infinite timeout is used and this
5213254f836SPaul Durrant  * needs to be reduced when a skb is queued.
5223254f836SPaul Durrant  *
5233254f836SPaul Durrant  * This cannot be done with wait_event_timeout() because it only
5243254f836SPaul Durrant  * calculates the timeout once.
5253254f836SPaul Durrant  */
5263254f836SPaul Durrant static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
5273254f836SPaul Durrant {
5283254f836SPaul Durrant 	DEFINE_WAIT(wait);
5293254f836SPaul Durrant 
5303254f836SPaul Durrant 	if (xenvif_have_rx_work(queue))
5313254f836SPaul Durrant 		return;
5323254f836SPaul Durrant 
5333254f836SPaul Durrant 	for (;;) {
5343254f836SPaul Durrant 		long ret;
5353254f836SPaul Durrant 
5363254f836SPaul Durrant 		prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
5373254f836SPaul Durrant 		if (xenvif_have_rx_work(queue))
5383254f836SPaul Durrant 			break;
5393254f836SPaul Durrant 		ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
5403254f836SPaul Durrant 		if (!ret)
5413254f836SPaul Durrant 			break;
5423254f836SPaul Durrant 	}
5433254f836SPaul Durrant 	finish_wait(&queue->wq, &wait);
5443254f836SPaul Durrant }
5453254f836SPaul Durrant 
5463254f836SPaul Durrant static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
5473254f836SPaul Durrant {
5483254f836SPaul Durrant 	struct xenvif *vif = queue->vif;
5493254f836SPaul Durrant 
5503254f836SPaul Durrant 	queue->stalled = true;
5513254f836SPaul Durrant 
5523254f836SPaul Durrant 	/* At least one queue has stalled? Disable the carrier. */
5533254f836SPaul Durrant 	spin_lock(&vif->lock);
5543254f836SPaul Durrant 	if (vif->stalled_queues++ == 0) {
5553254f836SPaul Durrant 		netdev_info(vif->dev, "Guest Rx stalled");
5563254f836SPaul Durrant 		netif_carrier_off(vif->dev);
5573254f836SPaul Durrant 	}
5583254f836SPaul Durrant 	spin_unlock(&vif->lock);
5593254f836SPaul Durrant }
5603254f836SPaul Durrant 
5613254f836SPaul Durrant static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
5623254f836SPaul Durrant {
5633254f836SPaul Durrant 	struct xenvif *vif = queue->vif;
5643254f836SPaul Durrant 
5653254f836SPaul Durrant 	queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
5663254f836SPaul Durrant 	queue->stalled = false;
5673254f836SPaul Durrant 
5683254f836SPaul Durrant 	/* All queues are ready? Enable the carrier. */
5693254f836SPaul Durrant 	spin_lock(&vif->lock);
5703254f836SPaul Durrant 	if (--vif->stalled_queues == 0) {
5713254f836SPaul Durrant 		netdev_info(vif->dev, "Guest Rx ready");
5723254f836SPaul Durrant 		netif_carrier_on(vif->dev);
5733254f836SPaul Durrant 	}
5743254f836SPaul Durrant 	spin_unlock(&vif->lock);
5753254f836SPaul Durrant }
5763254f836SPaul Durrant 
5773254f836SPaul Durrant int xenvif_kthread_guest_rx(void *data)
5783254f836SPaul Durrant {
5793254f836SPaul Durrant 	struct xenvif_queue *queue = data;
5803254f836SPaul Durrant 	struct xenvif *vif = queue->vif;
5813254f836SPaul Durrant 
5823254f836SPaul Durrant 	if (!vif->stall_timeout)
5833254f836SPaul Durrant 		xenvif_queue_carrier_on(queue);
5843254f836SPaul Durrant 
5853254f836SPaul Durrant 	for (;;) {
5863254f836SPaul Durrant 		xenvif_wait_for_rx_work(queue);
5873254f836SPaul Durrant 
5883254f836SPaul Durrant 		if (kthread_should_stop())
5893254f836SPaul Durrant 			break;
5903254f836SPaul Durrant 
5913254f836SPaul Durrant 		/* This frontend is found to be rogue, disable it in
5923254f836SPaul Durrant 		 * kthread context. Currently this is only set when
5933254f836SPaul Durrant 		 * netback finds out frontend sends malformed packet,
5943254f836SPaul Durrant 		 * but we cannot disable the interface in softirq
5953254f836SPaul Durrant 		 * context so we defer it here, if this thread is
5963254f836SPaul Durrant 		 * associated with queue 0.
5973254f836SPaul Durrant 		 */
5983254f836SPaul Durrant 		if (unlikely(vif->disabled && queue->id == 0)) {
5993254f836SPaul Durrant 			xenvif_carrier_off(vif);
6003254f836SPaul Durrant 			break;
6013254f836SPaul Durrant 		}
6023254f836SPaul Durrant 
6033254f836SPaul Durrant 		if (!skb_queue_empty(&queue->rx_queue))
6043254f836SPaul Durrant 			xenvif_rx_action(queue);
6053254f836SPaul Durrant 
6063254f836SPaul Durrant 		/* If the guest hasn't provided any Rx slots for a
6073254f836SPaul Durrant 		 * while it's probably not responsive, drop the
6083254f836SPaul Durrant 		 * carrier so packets are dropped earlier.
6093254f836SPaul Durrant 		 */
6103254f836SPaul Durrant 		if (vif->stall_timeout) {
6113254f836SPaul Durrant 			if (xenvif_rx_queue_stalled(queue))
6123254f836SPaul Durrant 				xenvif_queue_carrier_off(queue);
6133254f836SPaul Durrant 			else if (xenvif_rx_queue_ready(queue))
6143254f836SPaul Durrant 				xenvif_queue_carrier_on(queue);
6153254f836SPaul Durrant 		}
6163254f836SPaul Durrant 
6173254f836SPaul Durrant 		/* Queued packets may have foreign pages from other
6183254f836SPaul Durrant 		 * domains.  These cannot be queued indefinitely as
6193254f836SPaul Durrant 		 * this would starve guests of grant refs and transmit
6203254f836SPaul Durrant 		 * slots.
6213254f836SPaul Durrant 		 */
6223254f836SPaul Durrant 		xenvif_rx_queue_drop_expired(queue);
6233254f836SPaul Durrant 
6243254f836SPaul Durrant 		cond_resched();
6253254f836SPaul Durrant 	}
6263254f836SPaul Durrant 
6273254f836SPaul Durrant 	/* Bin any remaining skbs */
6283254f836SPaul Durrant 	xenvif_rx_queue_purge(queue);
6293254f836SPaul Durrant 
6303254f836SPaul Durrant 	return 0;
6313254f836SPaul Durrant }
632