13254f836SPaul Durrant /*
23254f836SPaul Durrant * Copyright (c) 2016 Citrix Systems Inc.
33254f836SPaul Durrant * Copyright (c) 2002-2005, K A Fraser
43254f836SPaul Durrant *
53254f836SPaul Durrant * This program is free software; you can redistribute it and/or
63254f836SPaul Durrant * modify it under the terms of the GNU General Public License version 2
73254f836SPaul Durrant * as published by the Free Software Foundation; or, when distributed
83254f836SPaul Durrant * separately from the Linux kernel or incorporated into other
93254f836SPaul Durrant * software packages, subject to the following license:
103254f836SPaul Durrant *
113254f836SPaul Durrant * Permission is hereby granted, free of charge, to any person obtaining a copy
123254f836SPaul Durrant * of this source file (the "Software"), to deal in the Software without
133254f836SPaul Durrant * restriction, including without limitation the rights to use, copy, modify,
143254f836SPaul Durrant * merge, publish, distribute, sublicense, and/or sell copies of the Software,
153254f836SPaul Durrant * and to permit persons to whom the Software is furnished to do so, subject to
163254f836SPaul Durrant * the following conditions:
173254f836SPaul Durrant *
183254f836SPaul Durrant * The above copyright notice and this permission notice shall be included in
193254f836SPaul Durrant * all copies or substantial portions of the Software.
203254f836SPaul Durrant *
213254f836SPaul Durrant * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
223254f836SPaul Durrant * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
233254f836SPaul Durrant * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
243254f836SPaul Durrant * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
253254f836SPaul Durrant * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
263254f836SPaul Durrant * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
273254f836SPaul Durrant * IN THE SOFTWARE.
283254f836SPaul Durrant */
293254f836SPaul Durrant #include "common.h"
303254f836SPaul Durrant
313254f836SPaul Durrant #include <linux/kthread.h>
323254f836SPaul Durrant
333254f836SPaul Durrant #include <xen/xen.h>
343254f836SPaul Durrant #include <xen/events.h>
353254f836SPaul Durrant
366032046eSJuergen Gross /*
376032046eSJuergen Gross * Update the needed ring page slots for the first SKB queued.
386032046eSJuergen Gross * Note that any call sequence outside the RX thread calling this function
396032046eSJuergen Gross * needs to wake up the RX thread via a call of xenvif_kick_thread()
406032046eSJuergen Gross * afterwards in order to avoid a race with putting the thread to sleep.
416032046eSJuergen Gross */
xenvif_update_needed_slots(struct xenvif_queue * queue,const struct sk_buff * skb)426032046eSJuergen Gross static void xenvif_update_needed_slots(struct xenvif_queue *queue,
436032046eSJuergen Gross const struct sk_buff *skb)
443254f836SPaul Durrant {
456032046eSJuergen Gross unsigned int needed = 0;
46ec7d8e7dSJuergen Gross
476032046eSJuergen Gross if (skb) {
483254f836SPaul Durrant needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
493254f836SPaul Durrant if (skb_is_gso(skb))
503254f836SPaul Durrant needed++;
513254f836SPaul Durrant if (skb->sw_hash)
523254f836SPaul Durrant needed++;
536032046eSJuergen Gross }
543254f836SPaul Durrant
556032046eSJuergen Gross WRITE_ONCE(queue->rx_slots_needed, needed);
566032046eSJuergen Gross }
576032046eSJuergen Gross
xenvif_rx_ring_slots_available(struct xenvif_queue * queue)586032046eSJuergen Gross static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
596032046eSJuergen Gross {
606032046eSJuergen Gross RING_IDX prod, cons;
616032046eSJuergen Gross unsigned int needed;
626032046eSJuergen Gross
636032046eSJuergen Gross needed = READ_ONCE(queue->rx_slots_needed);
646032046eSJuergen Gross if (!needed)
656032046eSJuergen Gross return false;
66ec7d8e7dSJuergen Gross
673254f836SPaul Durrant do {
683254f836SPaul Durrant prod = queue->rx.sring->req_prod;
693254f836SPaul Durrant cons = queue->rx.req_cons;
703254f836SPaul Durrant
713254f836SPaul Durrant if (prod - cons >= needed)
723254f836SPaul Durrant return true;
733254f836SPaul Durrant
743254f836SPaul Durrant queue->rx.sring->req_event = prod + 1;
753254f836SPaul Durrant
763254f836SPaul Durrant /* Make sure event is visible before we check prod
773254f836SPaul Durrant * again.
783254f836SPaul Durrant */
793254f836SPaul Durrant mb();
803254f836SPaul Durrant } while (queue->rx.sring->req_prod != prod);
813254f836SPaul Durrant
823254f836SPaul Durrant return false;
833254f836SPaul Durrant }
843254f836SPaul Durrant
xenvif_rx_queue_tail(struct xenvif_queue * queue,struct sk_buff * skb)85*74e7e1efSJuergen Gross bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
863254f836SPaul Durrant {
873254f836SPaul Durrant unsigned long flags;
88*74e7e1efSJuergen Gross bool ret = true;
893254f836SPaul Durrant
903254f836SPaul Durrant spin_lock_irqsave(&queue->rx_queue.lock, flags);
913254f836SPaul Durrant
92be81992fSJuergen Gross if (queue->rx_queue_len >= queue->rx_queue_max) {
93be81992fSJuergen Gross struct net_device *dev = queue->vif->dev;
94be81992fSJuergen Gross
95be81992fSJuergen Gross netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
96*74e7e1efSJuergen Gross ret = false;
97be81992fSJuergen Gross } else {
986032046eSJuergen Gross if (skb_queue_empty(&queue->rx_queue))
996032046eSJuergen Gross xenvif_update_needed_slots(queue, skb);
1006032046eSJuergen Gross
1013254f836SPaul Durrant __skb_queue_tail(&queue->rx_queue, skb);
1023254f836SPaul Durrant
1033254f836SPaul Durrant queue->rx_queue_len += skb->len;
1043254f836SPaul Durrant }
1053254f836SPaul Durrant
1063254f836SPaul Durrant spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
107*74e7e1efSJuergen Gross
108*74e7e1efSJuergen Gross return ret;
1093254f836SPaul Durrant }
1103254f836SPaul Durrant
xenvif_rx_dequeue(struct xenvif_queue * queue)1113254f836SPaul Durrant static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
1123254f836SPaul Durrant {
1133254f836SPaul Durrant struct sk_buff *skb;
1143254f836SPaul Durrant
1153254f836SPaul Durrant spin_lock_irq(&queue->rx_queue.lock);
1163254f836SPaul Durrant
1173254f836SPaul Durrant skb = __skb_dequeue(&queue->rx_queue);
1187c0b1a23SDavid Vrabel if (skb) {
1196032046eSJuergen Gross xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
1206032046eSJuergen Gross
1213254f836SPaul Durrant queue->rx_queue_len -= skb->len;
1227c0b1a23SDavid Vrabel if (queue->rx_queue_len < queue->rx_queue_max) {
1237c0b1a23SDavid Vrabel struct netdev_queue *txq;
1247c0b1a23SDavid Vrabel
1257c0b1a23SDavid Vrabel txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
1267c0b1a23SDavid Vrabel netif_tx_wake_queue(txq);
1277c0b1a23SDavid Vrabel }
1287c0b1a23SDavid Vrabel }
1293254f836SPaul Durrant
1303254f836SPaul Durrant spin_unlock_irq(&queue->rx_queue.lock);
1313254f836SPaul Durrant
1323254f836SPaul Durrant return skb;
1333254f836SPaul Durrant }
1343254f836SPaul Durrant
xenvif_rx_queue_purge(struct xenvif_queue * queue)1353254f836SPaul Durrant static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
1363254f836SPaul Durrant {
1373254f836SPaul Durrant struct sk_buff *skb;
1383254f836SPaul Durrant
1393254f836SPaul Durrant while ((skb = xenvif_rx_dequeue(queue)) != NULL)
1403254f836SPaul Durrant kfree_skb(skb);
1413254f836SPaul Durrant }
1423254f836SPaul Durrant
xenvif_rx_queue_drop_expired(struct xenvif_queue * queue)1433254f836SPaul Durrant static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
1443254f836SPaul Durrant {
1453254f836SPaul Durrant struct sk_buff *skb;
1463254f836SPaul Durrant
1473254f836SPaul Durrant for (;;) {
1483254f836SPaul Durrant skb = skb_peek(&queue->rx_queue);
1493254f836SPaul Durrant if (!skb)
1503254f836SPaul Durrant break;
1513254f836SPaul Durrant if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
1523254f836SPaul Durrant break;
1533254f836SPaul Durrant xenvif_rx_dequeue(queue);
1543254f836SPaul Durrant kfree_skb(skb);
155be81992fSJuergen Gross queue->vif->dev->stats.rx_dropped++;
1563254f836SPaul Durrant }
1573254f836SPaul Durrant }
1583254f836SPaul Durrant
xenvif_rx_copy_flush(struct xenvif_queue * queue)159eb1723a2SDavid Vrabel static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
1603254f836SPaul Durrant {
161eb1723a2SDavid Vrabel unsigned int i;
162a37f1229SDavid Vrabel int notify;
1633254f836SPaul Durrant
164eb1723a2SDavid Vrabel gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
1653254f836SPaul Durrant
166eb1723a2SDavid Vrabel for (i = 0; i < queue->rx_copy.num; i++) {
167eb1723a2SDavid Vrabel struct gnttab_copy *op;
1683254f836SPaul Durrant
169eb1723a2SDavid Vrabel op = &queue->rx_copy.op[i];
1703254f836SPaul Durrant
171eb1723a2SDavid Vrabel /* If the copy failed, overwrite the status field in
172eb1723a2SDavid Vrabel * the corresponding response.
173eb1723a2SDavid Vrabel */
174eb1723a2SDavid Vrabel if (unlikely(op->status != GNTST_okay)) {
175eb1723a2SDavid Vrabel struct xen_netif_rx_response *rsp;
176eb1723a2SDavid Vrabel
177eb1723a2SDavid Vrabel rsp = RING_GET_RESPONSE(&queue->rx,
178eb1723a2SDavid Vrabel queue->rx_copy.idx[i]);
179eb1723a2SDavid Vrabel rsp->status = op->status;
180eb1723a2SDavid Vrabel }
1813254f836SPaul Durrant }
1823254f836SPaul Durrant
183eb1723a2SDavid Vrabel queue->rx_copy.num = 0;
184a37f1229SDavid Vrabel
185a37f1229SDavid Vrabel /* Push responses for all completed packets. */
186a37f1229SDavid Vrabel RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
187a37f1229SDavid Vrabel if (notify)
188a37f1229SDavid Vrabel notify_remote_via_irq(queue->rx_irq);
189a37f1229SDavid Vrabel
190a37f1229SDavid Vrabel __skb_queue_purge(queue->rx_copy.completed);
191eb1723a2SDavid Vrabel }
1923254f836SPaul Durrant
xenvif_rx_copy_add(struct xenvif_queue * queue,struct xen_netif_rx_request * req,unsigned int offset,void * data,size_t len)193eb1723a2SDavid Vrabel static void xenvif_rx_copy_add(struct xenvif_queue *queue,
194eb1723a2SDavid Vrabel struct xen_netif_rx_request *req,
195eb1723a2SDavid Vrabel unsigned int offset, void *data, size_t len)
1963254f836SPaul Durrant {
197eb1723a2SDavid Vrabel struct gnttab_copy *op;
198eb1723a2SDavid Vrabel struct page *page;
1993254f836SPaul Durrant struct xen_page_foreign *foreign;
2003254f836SPaul Durrant
201eb1723a2SDavid Vrabel if (queue->rx_copy.num == COPY_BATCH_SIZE)
202eb1723a2SDavid Vrabel xenvif_rx_copy_flush(queue);
2033254f836SPaul Durrant
204eb1723a2SDavid Vrabel op = &queue->rx_copy.op[queue->rx_copy.num];
2053254f836SPaul Durrant
206eb1723a2SDavid Vrabel page = virt_to_page(data);
2073254f836SPaul Durrant
208eb1723a2SDavid Vrabel op->flags = GNTCOPY_dest_gref;
2093254f836SPaul Durrant
2103254f836SPaul Durrant foreign = xen_page_foreign(page);
2113254f836SPaul Durrant if (foreign) {
212eb1723a2SDavid Vrabel op->source.domid = foreign->domid;
213eb1723a2SDavid Vrabel op->source.u.ref = foreign->gref;
214eb1723a2SDavid Vrabel op->flags |= GNTCOPY_source_gref;
2153254f836SPaul Durrant } else {
216eb1723a2SDavid Vrabel op->source.u.gmfn = virt_to_gfn(data);
217eb1723a2SDavid Vrabel op->source.domid = DOMID_SELF;
2183254f836SPaul Durrant }
2193254f836SPaul Durrant
220eb1723a2SDavid Vrabel op->source.offset = xen_offset_in_page(data);
221eb1723a2SDavid Vrabel op->dest.u.ref = req->gref;
222eb1723a2SDavid Vrabel op->dest.domid = queue->vif->domid;
223eb1723a2SDavid Vrabel op->dest.offset = offset;
224eb1723a2SDavid Vrabel op->len = len;
225eb1723a2SDavid Vrabel
226eb1723a2SDavid Vrabel queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
227eb1723a2SDavid Vrabel queue->rx_copy.num++;
228eb1723a2SDavid Vrabel }
229eb1723a2SDavid Vrabel
xenvif_gso_type(struct sk_buff * skb)230eb1723a2SDavid Vrabel static unsigned int xenvif_gso_type(struct sk_buff *skb)
2313254f836SPaul Durrant {
2323254f836SPaul Durrant if (skb_is_gso(skb)) {
2333254f836SPaul Durrant if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
234eb1723a2SDavid Vrabel return XEN_NETIF_GSO_TYPE_TCPV4;
2353254f836SPaul Durrant else
236eb1723a2SDavid Vrabel return XEN_NETIF_GSO_TYPE_TCPV6;
2373254f836SPaul Durrant }
238eb1723a2SDavid Vrabel return XEN_NETIF_GSO_TYPE_NONE;
2393254f836SPaul Durrant }
2403254f836SPaul Durrant
241eb1723a2SDavid Vrabel struct xenvif_pkt_state {
2423254f836SPaul Durrant struct sk_buff *skb;
243eb1723a2SDavid Vrabel size_t remaining_len;
2442167ca02SRoss Lagerwall struct sk_buff *frag_iter;
2452167ca02SRoss Lagerwall int frag; /* frag == -1 => frag_iter->head */
246eb1723a2SDavid Vrabel unsigned int frag_offset;
247eb1723a2SDavid Vrabel struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
248eb1723a2SDavid Vrabel unsigned int extra_count;
249eb1723a2SDavid Vrabel unsigned int slot;
2503254f836SPaul Durrant };
2513254f836SPaul Durrant
xenvif_rx_next_skb(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt)252eb1723a2SDavid Vrabel static void xenvif_rx_next_skb(struct xenvif_queue *queue,
253eb1723a2SDavid Vrabel struct xenvif_pkt_state *pkt)
254eb1723a2SDavid Vrabel {
255eb1723a2SDavid Vrabel struct sk_buff *skb;
256eb1723a2SDavid Vrabel unsigned int gso_type;
2573254f836SPaul Durrant
258eb1723a2SDavid Vrabel skb = xenvif_rx_dequeue(queue);
2593254f836SPaul Durrant
2603254f836SPaul Durrant queue->stats.tx_bytes += skb->len;
2613254f836SPaul Durrant queue->stats.tx_packets++;
2623254f836SPaul Durrant
263eb1723a2SDavid Vrabel /* Reset packet state. */
264eb1723a2SDavid Vrabel memset(pkt, 0, sizeof(struct xenvif_pkt_state));
2653254f836SPaul Durrant
266eb1723a2SDavid Vrabel pkt->skb = skb;
2672167ca02SRoss Lagerwall pkt->frag_iter = skb;
268eb1723a2SDavid Vrabel pkt->remaining_len = skb->len;
269eb1723a2SDavid Vrabel pkt->frag = -1;
2703254f836SPaul Durrant
271eb1723a2SDavid Vrabel gso_type = xenvif_gso_type(skb);
272eb1723a2SDavid Vrabel if ((1 << gso_type) & queue->vif->gso_mask) {
273eb1723a2SDavid Vrabel struct xen_netif_extra_info *extra;
2743254f836SPaul Durrant
275eb1723a2SDavid Vrabel extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
2763254f836SPaul Durrant
277eb1723a2SDavid Vrabel extra->u.gso.type = gso_type;
278eb1723a2SDavid Vrabel extra->u.gso.size = skb_shinfo(skb)->gso_size;
2793254f836SPaul Durrant extra->u.gso.pad = 0;
2803254f836SPaul Durrant extra->u.gso.features = 0;
2813254f836SPaul Durrant extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
2823254f836SPaul Durrant extra->flags = 0;
283eb1723a2SDavid Vrabel
284eb1723a2SDavid Vrabel pkt->extra_count++;
2853254f836SPaul Durrant }
2863254f836SPaul Durrant
2871c9535c7SDenis Kirjanov if (queue->vif->xdp_headroom) {
2881c9535c7SDenis Kirjanov struct xen_netif_extra_info *extra;
2891c9535c7SDenis Kirjanov
2901c9535c7SDenis Kirjanov extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
2911c9535c7SDenis Kirjanov
2921c9535c7SDenis Kirjanov memset(extra, 0, sizeof(struct xen_netif_extra_info));
2931c9535c7SDenis Kirjanov extra->u.xdp.headroom = queue->vif->xdp_headroom;
2941c9535c7SDenis Kirjanov extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
2951c9535c7SDenis Kirjanov extra->flags = 0;
2961c9535c7SDenis Kirjanov
2971c9535c7SDenis Kirjanov pkt->extra_count++;
2981c9535c7SDenis Kirjanov }
2991c9535c7SDenis Kirjanov
3003254f836SPaul Durrant if (skb->sw_hash) {
301eb1723a2SDavid Vrabel struct xen_netif_extra_info *extra;
3023254f836SPaul Durrant
303eb1723a2SDavid Vrabel extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
3043254f836SPaul Durrant
3053254f836SPaul Durrant extra->u.hash.algorithm =
3063254f836SPaul Durrant XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
3073254f836SPaul Durrant
3083254f836SPaul Durrant if (skb->l4_hash)
3093254f836SPaul Durrant extra->u.hash.type =
3103254f836SPaul Durrant skb->protocol == htons(ETH_P_IP) ?
3113254f836SPaul Durrant _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
3123254f836SPaul Durrant _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
3133254f836SPaul Durrant else
3143254f836SPaul Durrant extra->u.hash.type =
3153254f836SPaul Durrant skb->protocol == htons(ETH_P_IP) ?
3163254f836SPaul Durrant _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
3173254f836SPaul Durrant _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
3183254f836SPaul Durrant
319eb1723a2SDavid Vrabel *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
3203254f836SPaul Durrant
3213254f836SPaul Durrant extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
3223254f836SPaul Durrant extra->flags = 0;
323eb1723a2SDavid Vrabel
324eb1723a2SDavid Vrabel pkt->extra_count++;
325eb1723a2SDavid Vrabel }
3263254f836SPaul Durrant }
3273254f836SPaul Durrant
xenvif_rx_complete(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt)328eb1723a2SDavid Vrabel static void xenvif_rx_complete(struct xenvif_queue *queue,
329eb1723a2SDavid Vrabel struct xenvif_pkt_state *pkt)
330eb1723a2SDavid Vrabel {
331a37f1229SDavid Vrabel /* All responses are ready to be pushed. */
332eb1723a2SDavid Vrabel queue->rx.rsp_prod_pvt = queue->rx.req_cons;
333eb1723a2SDavid Vrabel
334a37f1229SDavid Vrabel __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
335eb1723a2SDavid Vrabel }
336eb1723a2SDavid Vrabel
xenvif_rx_next_frag(struct xenvif_pkt_state * pkt)3372167ca02SRoss Lagerwall static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
3382167ca02SRoss Lagerwall {
3392167ca02SRoss Lagerwall struct sk_buff *frag_iter = pkt->frag_iter;
3402167ca02SRoss Lagerwall unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
3412167ca02SRoss Lagerwall
3422167ca02SRoss Lagerwall pkt->frag++;
3432167ca02SRoss Lagerwall pkt->frag_offset = 0;
3442167ca02SRoss Lagerwall
3452167ca02SRoss Lagerwall if (pkt->frag >= nr_frags) {
3462167ca02SRoss Lagerwall if (frag_iter == pkt->skb)
3472167ca02SRoss Lagerwall pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
3482167ca02SRoss Lagerwall else
3492167ca02SRoss Lagerwall pkt->frag_iter = frag_iter->next;
3502167ca02SRoss Lagerwall
3512167ca02SRoss Lagerwall pkt->frag = -1;
3522167ca02SRoss Lagerwall }
3532167ca02SRoss Lagerwall }
3542167ca02SRoss Lagerwall
xenvif_rx_next_chunk(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,unsigned int offset,void ** data,size_t * len)355eb1723a2SDavid Vrabel static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
356eb1723a2SDavid Vrabel struct xenvif_pkt_state *pkt,
357eb1723a2SDavid Vrabel unsigned int offset, void **data,
358eb1723a2SDavid Vrabel size_t *len)
359eb1723a2SDavid Vrabel {
3602167ca02SRoss Lagerwall struct sk_buff *frag_iter = pkt->frag_iter;
361eb1723a2SDavid Vrabel void *frag_data;
362eb1723a2SDavid Vrabel size_t frag_len, chunk_len;
363eb1723a2SDavid Vrabel
3642167ca02SRoss Lagerwall BUG_ON(!frag_iter);
3652167ca02SRoss Lagerwall
366eb1723a2SDavid Vrabel if (pkt->frag == -1) {
3672167ca02SRoss Lagerwall frag_data = frag_iter->data;
3682167ca02SRoss Lagerwall frag_len = skb_headlen(frag_iter);
369eb1723a2SDavid Vrabel } else {
3702167ca02SRoss Lagerwall skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
371eb1723a2SDavid Vrabel
372eb1723a2SDavid Vrabel frag_data = skb_frag_address(frag);
373eb1723a2SDavid Vrabel frag_len = skb_frag_size(frag);
374eb1723a2SDavid Vrabel }
375eb1723a2SDavid Vrabel
376eb1723a2SDavid Vrabel frag_data += pkt->frag_offset;
377eb1723a2SDavid Vrabel frag_len -= pkt->frag_offset;
378eb1723a2SDavid Vrabel
379f112be65SArnd Bergmann chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
380f112be65SArnd Bergmann chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
381f112be65SArnd Bergmann xen_offset_in_page(frag_data));
382eb1723a2SDavid Vrabel
383eb1723a2SDavid Vrabel pkt->frag_offset += chunk_len;
384eb1723a2SDavid Vrabel
385eb1723a2SDavid Vrabel /* Advance to next frag? */
3862167ca02SRoss Lagerwall if (frag_len == chunk_len)
3872167ca02SRoss Lagerwall xenvif_rx_next_frag(pkt);
388eb1723a2SDavid Vrabel
389eb1723a2SDavid Vrabel *data = frag_data;
390eb1723a2SDavid Vrabel *len = chunk_len;
391eb1723a2SDavid Vrabel }
392eb1723a2SDavid Vrabel
xenvif_rx_data_slot(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,struct xen_netif_rx_request * req,struct xen_netif_rx_response * rsp)393eb1723a2SDavid Vrabel static void xenvif_rx_data_slot(struct xenvif_queue *queue,
394eb1723a2SDavid Vrabel struct xenvif_pkt_state *pkt,
395eb1723a2SDavid Vrabel struct xen_netif_rx_request *req,
396eb1723a2SDavid Vrabel struct xen_netif_rx_response *rsp)
397eb1723a2SDavid Vrabel {
3981c9535c7SDenis Kirjanov unsigned int offset = queue->vif->xdp_headroom;
399eb1723a2SDavid Vrabel unsigned int flags;
400eb1723a2SDavid Vrabel
401eb1723a2SDavid Vrabel do {
402eb1723a2SDavid Vrabel size_t len;
403eb1723a2SDavid Vrabel void *data;
404eb1723a2SDavid Vrabel
405eb1723a2SDavid Vrabel xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
406eb1723a2SDavid Vrabel xenvif_rx_copy_add(queue, req, offset, data, len);
407eb1723a2SDavid Vrabel
408eb1723a2SDavid Vrabel offset += len;
409eb1723a2SDavid Vrabel pkt->remaining_len -= len;
410eb1723a2SDavid Vrabel
411eb1723a2SDavid Vrabel } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
412eb1723a2SDavid Vrabel
413eb1723a2SDavid Vrabel if (pkt->remaining_len > 0)
414eb1723a2SDavid Vrabel flags = XEN_NETRXF_more_data;
415eb1723a2SDavid Vrabel else
416eb1723a2SDavid Vrabel flags = 0;
417eb1723a2SDavid Vrabel
418eb1723a2SDavid Vrabel if (pkt->slot == 0) {
419eb1723a2SDavid Vrabel struct sk_buff *skb = pkt->skb;
420eb1723a2SDavid Vrabel
421eb1723a2SDavid Vrabel if (skb->ip_summed == CHECKSUM_PARTIAL)
422eb1723a2SDavid Vrabel flags |= XEN_NETRXF_csum_blank |
423eb1723a2SDavid Vrabel XEN_NETRXF_data_validated;
424eb1723a2SDavid Vrabel else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
425eb1723a2SDavid Vrabel flags |= XEN_NETRXF_data_validated;
426eb1723a2SDavid Vrabel
427eb1723a2SDavid Vrabel if (pkt->extra_count != 0)
428eb1723a2SDavid Vrabel flags |= XEN_NETRXF_extra_info;
429eb1723a2SDavid Vrabel }
430eb1723a2SDavid Vrabel
431eb1723a2SDavid Vrabel rsp->offset = 0;
432eb1723a2SDavid Vrabel rsp->flags = flags;
433eb1723a2SDavid Vrabel rsp->id = req->id;
434eb1723a2SDavid Vrabel rsp->status = (s16)offset;
435eb1723a2SDavid Vrabel }
436eb1723a2SDavid Vrabel
xenvif_rx_extra_slot(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,struct xen_netif_rx_request * req,struct xen_netif_rx_response * rsp)437eb1723a2SDavid Vrabel static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
438eb1723a2SDavid Vrabel struct xenvif_pkt_state *pkt,
439eb1723a2SDavid Vrabel struct xen_netif_rx_request *req,
440eb1723a2SDavid Vrabel struct xen_netif_rx_response *rsp)
441eb1723a2SDavid Vrabel {
442eb1723a2SDavid Vrabel struct xen_netif_extra_info *extra = (void *)rsp;
443eb1723a2SDavid Vrabel unsigned int i;
444eb1723a2SDavid Vrabel
445eb1723a2SDavid Vrabel pkt->extra_count--;
446eb1723a2SDavid Vrabel
447eb1723a2SDavid Vrabel for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
448eb1723a2SDavid Vrabel if (pkt->extras[i].type) {
449eb1723a2SDavid Vrabel *extra = pkt->extras[i];
450eb1723a2SDavid Vrabel
451eb1723a2SDavid Vrabel if (pkt->extra_count != 0)
452eb1723a2SDavid Vrabel extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
453eb1723a2SDavid Vrabel
454eb1723a2SDavid Vrabel pkt->extras[i].type = 0;
455eb1723a2SDavid Vrabel return;
456eb1723a2SDavid Vrabel }
457eb1723a2SDavid Vrabel }
458eb1723a2SDavid Vrabel BUG();
459eb1723a2SDavid Vrabel }
460eb1723a2SDavid Vrabel
xenvif_rx_skb(struct xenvif_queue * queue)461ed820f47SColin Ian King static void xenvif_rx_skb(struct xenvif_queue *queue)
462eb1723a2SDavid Vrabel {
463eb1723a2SDavid Vrabel struct xenvif_pkt_state pkt;
464eb1723a2SDavid Vrabel
465eb1723a2SDavid Vrabel xenvif_rx_next_skb(queue, &pkt);
466eb1723a2SDavid Vrabel
467d1ef006dSDavid Vrabel queue->last_rx_time = jiffies;
468d1ef006dSDavid Vrabel
469eb1723a2SDavid Vrabel do {
470eb1723a2SDavid Vrabel struct xen_netif_rx_request *req;
471eb1723a2SDavid Vrabel struct xen_netif_rx_response *rsp;
472eb1723a2SDavid Vrabel
473eb1723a2SDavid Vrabel req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
474eb1723a2SDavid Vrabel rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
475eb1723a2SDavid Vrabel
476eb1723a2SDavid Vrabel /* Extras must go after the first data slot */
477eb1723a2SDavid Vrabel if (pkt.slot != 0 && pkt.extra_count != 0)
478eb1723a2SDavid Vrabel xenvif_rx_extra_slot(queue, &pkt, req, rsp);
479eb1723a2SDavid Vrabel else
480eb1723a2SDavid Vrabel xenvif_rx_data_slot(queue, &pkt, req, rsp);
481eb1723a2SDavid Vrabel
482eb1723a2SDavid Vrabel queue->rx.req_cons++;
483eb1723a2SDavid Vrabel pkt.slot++;
484eb1723a2SDavid Vrabel } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
485eb1723a2SDavid Vrabel
486eb1723a2SDavid Vrabel xenvif_rx_complete(queue, &pkt);
4873254f836SPaul Durrant }
4883254f836SPaul Durrant
48998f6d57cSDavid Vrabel #define RX_BATCH_SIZE 64
49098f6d57cSDavid Vrabel
xenvif_rx_action(struct xenvif_queue * queue)4915834e72eSJuergen Gross static void xenvif_rx_action(struct xenvif_queue *queue)
49298f6d57cSDavid Vrabel {
493a37f1229SDavid Vrabel struct sk_buff_head completed_skbs;
49498f6d57cSDavid Vrabel unsigned int work_done = 0;
49598f6d57cSDavid Vrabel
496a37f1229SDavid Vrabel __skb_queue_head_init(&completed_skbs);
497a37f1229SDavid Vrabel queue->rx_copy.completed = &completed_skbs;
498a37f1229SDavid Vrabel
49998f6d57cSDavid Vrabel while (xenvif_rx_ring_slots_available(queue) &&
50094e81006SJuergen Gross !skb_queue_empty(&queue->rx_queue) &&
50198f6d57cSDavid Vrabel work_done < RX_BATCH_SIZE) {
50298f6d57cSDavid Vrabel xenvif_rx_skb(queue);
50398f6d57cSDavid Vrabel work_done++;
50498f6d57cSDavid Vrabel }
505a37f1229SDavid Vrabel
506a37f1229SDavid Vrabel /* Flush any pending copies and complete all skbs. */
507a37f1229SDavid Vrabel xenvif_rx_copy_flush(queue);
50898f6d57cSDavid Vrabel }
50998f6d57cSDavid Vrabel
xenvif_rx_queue_slots(const struct xenvif_queue * queue)5106032046eSJuergen Gross static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
5113254f836SPaul Durrant {
5123254f836SPaul Durrant RING_IDX prod, cons;
5133254f836SPaul Durrant
5143254f836SPaul Durrant prod = queue->rx.sring->req_prod;
5153254f836SPaul Durrant cons = queue->rx.req_cons;
5163254f836SPaul Durrant
5176032046eSJuergen Gross return prod - cons;
5186032046eSJuergen Gross }
5196032046eSJuergen Gross
xenvif_rx_queue_stalled(const struct xenvif_queue * queue)5206032046eSJuergen Gross static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
5216032046eSJuergen Gross {
5226032046eSJuergen Gross unsigned int needed = READ_ONCE(queue->rx_slots_needed);
5236032046eSJuergen Gross
5243254f836SPaul Durrant return !queue->stalled &&
5256032046eSJuergen Gross xenvif_rx_queue_slots(queue) < needed &&
5263254f836SPaul Durrant time_after(jiffies,
5273254f836SPaul Durrant queue->last_rx_time + queue->vif->stall_timeout);
5283254f836SPaul Durrant }
5293254f836SPaul Durrant
xenvif_rx_queue_ready(struct xenvif_queue * queue)5303254f836SPaul Durrant static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
5313254f836SPaul Durrant {
5326032046eSJuergen Gross unsigned int needed = READ_ONCE(queue->rx_slots_needed);
5333254f836SPaul Durrant
5346032046eSJuergen Gross return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
5353254f836SPaul Durrant }
5363254f836SPaul Durrant
xenvif_have_rx_work(struct xenvif_queue * queue,bool test_kthread)53723025393SJuergen Gross bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
5383254f836SPaul Durrant {
5393254f836SPaul Durrant return xenvif_rx_ring_slots_available(queue) ||
5403254f836SPaul Durrant (queue->vif->stall_timeout &&
5413254f836SPaul Durrant (xenvif_rx_queue_stalled(queue) ||
5423254f836SPaul Durrant xenvif_rx_queue_ready(queue))) ||
54323025393SJuergen Gross (test_kthread && kthread_should_stop()) ||
5443254f836SPaul Durrant queue->vif->disabled;
5453254f836SPaul Durrant }
5463254f836SPaul Durrant
xenvif_rx_queue_timeout(struct xenvif_queue * queue)5473254f836SPaul Durrant static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
5483254f836SPaul Durrant {
5493254f836SPaul Durrant struct sk_buff *skb;
5503254f836SPaul Durrant long timeout;
5513254f836SPaul Durrant
5523254f836SPaul Durrant skb = skb_peek(&queue->rx_queue);
5533254f836SPaul Durrant if (!skb)
5543254f836SPaul Durrant return MAX_SCHEDULE_TIMEOUT;
5553254f836SPaul Durrant
5563254f836SPaul Durrant timeout = XENVIF_RX_CB(skb)->expires - jiffies;
5573254f836SPaul Durrant return timeout < 0 ? 0 : timeout;
5583254f836SPaul Durrant }
5593254f836SPaul Durrant
5603254f836SPaul Durrant /* Wait until the guest Rx thread has work.
5613254f836SPaul Durrant *
5623254f836SPaul Durrant * The timeout needs to be adjusted based on the current head of the
5633254f836SPaul Durrant * queue (and not just the head at the beginning). In particular, if
5643254f836SPaul Durrant * the queue is initially empty an infinite timeout is used and this
5653254f836SPaul Durrant * needs to be reduced when a skb is queued.
5663254f836SPaul Durrant *
5673254f836SPaul Durrant * This cannot be done with wait_event_timeout() because it only
5683254f836SPaul Durrant * calculates the timeout once.
5693254f836SPaul Durrant */
xenvif_wait_for_rx_work(struct xenvif_queue * queue)5703254f836SPaul Durrant static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
5713254f836SPaul Durrant {
5723254f836SPaul Durrant DEFINE_WAIT(wait);
5733254f836SPaul Durrant
57423025393SJuergen Gross if (xenvif_have_rx_work(queue, true))
5753254f836SPaul Durrant return;
5763254f836SPaul Durrant
5773254f836SPaul Durrant for (;;) {
5783254f836SPaul Durrant long ret;
5793254f836SPaul Durrant
5803254f836SPaul Durrant prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
58123025393SJuergen Gross if (xenvif_have_rx_work(queue, true))
5823254f836SPaul Durrant break;
58323025393SJuergen Gross if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
58423025393SJuergen Gross &queue->eoi_pending) &
58523025393SJuergen Gross (NETBK_RX_EOI | NETBK_COMMON_EOI))
58623025393SJuergen Gross xen_irq_lateeoi(queue->rx_irq, 0);
58723025393SJuergen Gross
5883254f836SPaul Durrant ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
5893254f836SPaul Durrant if (!ret)
5903254f836SPaul Durrant break;
5913254f836SPaul Durrant }
5923254f836SPaul Durrant finish_wait(&queue->wq, &wait);
5933254f836SPaul Durrant }
5943254f836SPaul Durrant
xenvif_queue_carrier_off(struct xenvif_queue * queue)5953254f836SPaul Durrant static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
5963254f836SPaul Durrant {
5973254f836SPaul Durrant struct xenvif *vif = queue->vif;
5983254f836SPaul Durrant
5993254f836SPaul Durrant queue->stalled = true;
6003254f836SPaul Durrant
6013254f836SPaul Durrant /* At least one queue has stalled? Disable the carrier. */
6023254f836SPaul Durrant spin_lock(&vif->lock);
6033254f836SPaul Durrant if (vif->stalled_queues++ == 0) {
6043254f836SPaul Durrant netdev_info(vif->dev, "Guest Rx stalled");
6053254f836SPaul Durrant netif_carrier_off(vif->dev);
6063254f836SPaul Durrant }
6073254f836SPaul Durrant spin_unlock(&vif->lock);
6083254f836SPaul Durrant }
6093254f836SPaul Durrant
xenvif_queue_carrier_on(struct xenvif_queue * queue)6103254f836SPaul Durrant static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
6113254f836SPaul Durrant {
6123254f836SPaul Durrant struct xenvif *vif = queue->vif;
6133254f836SPaul Durrant
6143254f836SPaul Durrant queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
6153254f836SPaul Durrant queue->stalled = false;
6163254f836SPaul Durrant
6173254f836SPaul Durrant /* All queues are ready? Enable the carrier. */
6183254f836SPaul Durrant spin_lock(&vif->lock);
6193254f836SPaul Durrant if (--vif->stalled_queues == 0) {
6203254f836SPaul Durrant netdev_info(vif->dev, "Guest Rx ready");
6213254f836SPaul Durrant netif_carrier_on(vif->dev);
6223254f836SPaul Durrant }
6233254f836SPaul Durrant spin_unlock(&vif->lock);
6243254f836SPaul Durrant }
6253254f836SPaul Durrant
xenvif_kthread_guest_rx(void * data)6263254f836SPaul Durrant int xenvif_kthread_guest_rx(void *data)
6273254f836SPaul Durrant {
6283254f836SPaul Durrant struct xenvif_queue *queue = data;
6293254f836SPaul Durrant struct xenvif *vif = queue->vif;
6303254f836SPaul Durrant
6313254f836SPaul Durrant if (!vif->stall_timeout)
6323254f836SPaul Durrant xenvif_queue_carrier_on(queue);
6333254f836SPaul Durrant
6343254f836SPaul Durrant for (;;) {
6353254f836SPaul Durrant xenvif_wait_for_rx_work(queue);
6363254f836SPaul Durrant
6373254f836SPaul Durrant if (kthread_should_stop())
6383254f836SPaul Durrant break;
6393254f836SPaul Durrant
6403254f836SPaul Durrant /* This frontend is found to be rogue, disable it in
6413254f836SPaul Durrant * kthread context. Currently this is only set when
6423254f836SPaul Durrant * netback finds out frontend sends malformed packet,
6433254f836SPaul Durrant * but we cannot disable the interface in softirq
6443254f836SPaul Durrant * context so we defer it here, if this thread is
6453254f836SPaul Durrant * associated with queue 0.
6463254f836SPaul Durrant */
6473254f836SPaul Durrant if (unlikely(vif->disabled && queue->id == 0)) {
6483254f836SPaul Durrant xenvif_carrier_off(vif);
6493254f836SPaul Durrant break;
6503254f836SPaul Durrant }
6513254f836SPaul Durrant
6523254f836SPaul Durrant if (!skb_queue_empty(&queue->rx_queue))
6533254f836SPaul Durrant xenvif_rx_action(queue);
6543254f836SPaul Durrant
6553254f836SPaul Durrant /* If the guest hasn't provided any Rx slots for a
6563254f836SPaul Durrant * while it's probably not responsive, drop the
6573254f836SPaul Durrant * carrier so packets are dropped earlier.
6583254f836SPaul Durrant */
6593254f836SPaul Durrant if (vif->stall_timeout) {
6603254f836SPaul Durrant if (xenvif_rx_queue_stalled(queue))
6613254f836SPaul Durrant xenvif_queue_carrier_off(queue);
6623254f836SPaul Durrant else if (xenvif_rx_queue_ready(queue))
6633254f836SPaul Durrant xenvif_queue_carrier_on(queue);
6643254f836SPaul Durrant }
6653254f836SPaul Durrant
6663254f836SPaul Durrant /* Queued packets may have foreign pages from other
6673254f836SPaul Durrant * domains. These cannot be queued indefinitely as
6683254f836SPaul Durrant * this would starve guests of grant refs and transmit
6693254f836SPaul Durrant * slots.
6703254f836SPaul Durrant */
6713254f836SPaul Durrant xenvif_rx_queue_drop_expired(queue);
6723254f836SPaul Durrant
6733254f836SPaul Durrant cond_resched();
6743254f836SPaul Durrant }
6753254f836SPaul Durrant
6763254f836SPaul Durrant /* Bin any remaining skbs */
6773254f836SPaul Durrant xenvif_rx_queue_purge(queue);
6783254f836SPaul Durrant
6793254f836SPaul Durrant return 0;
6803254f836SPaul Durrant }
681