1f942dc25SIan Campbell /*
2f942dc25SIan Campbell * Back-end of the driver for virtual network devices. This portion of the
3f942dc25SIan Campbell * driver exports a 'unified' network-device interface that can be accessed
4f942dc25SIan Campbell * by any operating system that implements a compatible front end. A
5f942dc25SIan Campbell * reference front-end implementation can be found in:
6f942dc25SIan Campbell * drivers/net/xen-netfront.c
7f942dc25SIan Campbell *
8f942dc25SIan Campbell * Copyright (c) 2002-2005, K A Fraser
9f942dc25SIan Campbell *
10f942dc25SIan Campbell * This program is free software; you can redistribute it and/or
11f942dc25SIan Campbell * modify it under the terms of the GNU General Public License version 2
12f942dc25SIan Campbell * as published by the Free Software Foundation; or, when distributed
13f942dc25SIan Campbell * separately from the Linux kernel or incorporated into other
14f942dc25SIan Campbell * software packages, subject to the following license:
15f942dc25SIan Campbell *
16f942dc25SIan Campbell * Permission is hereby granted, free of charge, to any person obtaining a copy
17f942dc25SIan Campbell * of this source file (the "Software"), to deal in the Software without
18f942dc25SIan Campbell * restriction, including without limitation the rights to use, copy, modify,
19f942dc25SIan Campbell * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20f942dc25SIan Campbell * and to permit persons to whom the Software is furnished to do so, subject to
21f942dc25SIan Campbell * the following conditions:
22f942dc25SIan Campbell *
23f942dc25SIan Campbell * The above copyright notice and this permission notice shall be included in
24f942dc25SIan Campbell * all copies or substantial portions of the Software.
25f942dc25SIan Campbell *
26f942dc25SIan Campbell * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27f942dc25SIan Campbell * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28f942dc25SIan Campbell * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29f942dc25SIan Campbell * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30f942dc25SIan Campbell * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31f942dc25SIan Campbell * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32f942dc25SIan Campbell * IN THE SOFTWARE.
33f942dc25SIan Campbell */
34f942dc25SIan Campbell
35f942dc25SIan Campbell #include "common.h"
36f942dc25SIan Campbell
37f942dc25SIan Campbell #include <linux/kthread.h>
38f942dc25SIan Campbell #include <linux/if_vlan.h>
39f942dc25SIan Campbell #include <linux/udp.h>
40e3377f36SZoltan Kiss #include <linux/highmem.h>
41*f6d827b1SMina Almasry #include <linux/skbuff_ref.h>
42f942dc25SIan Campbell
43f942dc25SIan Campbell #include <net/tcp.h>
44f942dc25SIan Campbell
45ca981633SStefano Stabellini #include <xen/xen.h>
46f942dc25SIan Campbell #include <xen/events.h>
47f942dc25SIan Campbell #include <xen/interface/memory.h>
48a9fd60e2SJulien Grall #include <xen/page.h>
49f942dc25SIan Campbell
50f942dc25SIan Campbell #include <asm/xen/hypercall.h>
51f942dc25SIan Campbell
52e1f00a69SWei Liu /* Provide an option to disable split event channels at load time as
53e1f00a69SWei Liu * event channels are limited resource. Split event channels are
54e1f00a69SWei Liu * enabled by default.
55e1f00a69SWei Liu */
56c489dbb1SShailendra Verma bool separate_tx_rx_irq = true;
57e1f00a69SWei Liu module_param(separate_tx_rx_irq, bool, 0644);
58e1f00a69SWei Liu
59f48da8b1SDavid Vrabel /* The time that packets can stay on the guest Rx internal queue
60f48da8b1SDavid Vrabel * before they are dropped.
6109350788SZoltan Kiss */
6209350788SZoltan Kiss unsigned int rx_drain_timeout_msecs = 10000;
6309350788SZoltan Kiss module_param(rx_drain_timeout_msecs, uint, 0444);
6409350788SZoltan Kiss
65ecf08d2dSDavid Vrabel /* The length of time before the frontend is considered unresponsive
66ecf08d2dSDavid Vrabel * because it isn't providing Rx slots.
67ecf08d2dSDavid Vrabel */
6826c0e102SDavid Vrabel unsigned int rx_stall_timeout_msecs = 60000;
69ecf08d2dSDavid Vrabel module_param(rx_stall_timeout_msecs, uint, 0444);
70ecf08d2dSDavid Vrabel
7156dd5af9SJuergen Gross #define MAX_QUEUES_DEFAULT 8
728d3d53b3SAndrew J. Bennieston unsigned int xenvif_max_queues;
738d3d53b3SAndrew J. Bennieston module_param_named(max_queues, xenvif_max_queues, uint, 0644);
748d3d53b3SAndrew J. Bennieston MODULE_PARM_DESC(max_queues,
758d3d53b3SAndrew J. Bennieston "Maximum number of queues per virtual interface");
768d3d53b3SAndrew J. Bennieston
772810e5b9SWei Liu /*
782810e5b9SWei Liu * This is the maximum slots a skb can have. If a guest sends a skb
792810e5b9SWei Liu * which exceeds this limit it is considered malicious.
802810e5b9SWei Liu */
8137641494SWei Liu #define FATAL_SKB_SLOTS_DEFAULT 20
8237641494SWei Liu static unsigned int fatal_skb_slots = FATAL_SKB_SLOTS_DEFAULT;
8337641494SWei Liu module_param(fatal_skb_slots, uint, 0444);
8437641494SWei Liu
857e5d7753SMalcolm Crossley /* The amount to copy out of the first guest Tx slot into the skb's
867e5d7753SMalcolm Crossley * linear area. If the first slot has more data, it will be mapped
877e5d7753SMalcolm Crossley * and put into the first frag.
887e5d7753SMalcolm Crossley *
897e5d7753SMalcolm Crossley * This is sized to avoid pulling headers from the frags for most
907e5d7753SMalcolm Crossley * TCP/IP packets.
917e5d7753SMalcolm Crossley */
927e5d7753SMalcolm Crossley #define XEN_NETBACK_TX_COPY_LEN 128
937e5d7753SMalcolm Crossley
9440d8abdeSPaul Durrant /* This is the maximum number of flows in the hash cache. */
9540d8abdeSPaul Durrant #define XENVIF_HASH_CACHE_SIZE_DEFAULT 64
9640d8abdeSPaul Durrant unsigned int xenvif_hash_cache_size = XENVIF_HASH_CACHE_SIZE_DEFAULT;
9740d8abdeSPaul Durrant module_param_named(hash_cache_size, xenvif_hash_cache_size, uint, 0644);
9840d8abdeSPaul Durrant MODULE_PARM_DESC(hash_cache_size, "Number of flows in the hash cache");
997e5d7753SMalcolm Crossley
1001c9535c7SDenis Kirjanov /* The module parameter tells that we have to put data
1011c9535c7SDenis Kirjanov * for xen-netfront with the XDP_PACKET_HEADROOM offset
1021c9535c7SDenis Kirjanov * needed for XDP processing
1031c9535c7SDenis Kirjanov */
1041c9535c7SDenis Kirjanov bool provides_xdp_headroom = true;
1051c9535c7SDenis Kirjanov module_param(provides_xdp_headroom, bool, 0644);
1061c9535c7SDenis Kirjanov
107e9ce7cb6SWei Liu static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
1087b55984cSJan Beulich s8 status);
1097376419aSWei Liu
110e9ce7cb6SWei Liu static void make_tx_response(struct xenvif_queue *queue,
1117b55984cSJan Beulich const struct xen_netif_tx_request *txp,
112562abd39SPaul Durrant unsigned int extra_count,
1137b55984cSJan Beulich s8 status);
114b3f980bdSWei Liu
1155834e72eSJuergen Gross static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
1165834e72eSJuergen Gross
117e9ce7cb6SWei Liu static inline int tx_work_todo(struct xenvif_queue *queue);
118b3f980bdSWei Liu
idx_to_pfn(struct xenvif_queue * queue,u16 idx)119e9ce7cb6SWei Liu static inline unsigned long idx_to_pfn(struct xenvif_queue *queue,
120ea066ad1SIan Campbell u16 idx)
121f942dc25SIan Campbell {
122e9ce7cb6SWei Liu return page_to_pfn(queue->mmap_pages[idx]);
123f942dc25SIan Campbell }
124f942dc25SIan Campbell
idx_to_kaddr(struct xenvif_queue * queue,u16 idx)125e9ce7cb6SWei Liu static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue,
126ea066ad1SIan Campbell u16 idx)
127f942dc25SIan Campbell {
128e9ce7cb6SWei Liu return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx));
129f942dc25SIan Campbell }
130f942dc25SIan Campbell
1317aceb47aSZoltan Kiss #define callback_param(vif, pending_idx) \
1327aceb47aSZoltan Kiss (vif->pending_tx_info[pending_idx].callback_struct)
1337aceb47aSZoltan Kiss
134f53c3fe8SZoltan Kiss /* Find the containing VIF's structure from a pointer in pending_tx_info array
135f53c3fe8SZoltan Kiss */
ubuf_to_queue(const struct ubuf_info_msgzc * ubuf)136b63ca3e8SPavel Begunkov static inline struct xenvif_queue *ubuf_to_queue(const struct ubuf_info_msgzc *ubuf)
1373e2234b3SZoltan Kiss {
138f53c3fe8SZoltan Kiss u16 pending_idx = ubuf->desc;
139f53c3fe8SZoltan Kiss struct pending_tx_info *temp =
140f53c3fe8SZoltan Kiss container_of(ubuf, struct pending_tx_info, callback_struct);
141f53c3fe8SZoltan Kiss return container_of(temp - pending_idx,
142e9ce7cb6SWei Liu struct xenvif_queue,
143f53c3fe8SZoltan Kiss pending_tx_info[0]);
1443e2234b3SZoltan Kiss }
145f53c3fe8SZoltan Kiss
frag_get_pending_idx(skb_frag_t * frag)146ea066ad1SIan Campbell static u16 frag_get_pending_idx(skb_frag_t *frag)
147ea066ad1SIan Campbell {
148b54c9d5bSJonathan Lemon return (u16)skb_frag_off(frag);
149ea066ad1SIan Campbell }
150ea066ad1SIan Campbell
frag_set_pending_idx(skb_frag_t * frag,u16 pending_idx)151ea066ad1SIan Campbell static void frag_set_pending_idx(skb_frag_t *frag, u16 pending_idx)
152ea066ad1SIan Campbell {
153b54c9d5bSJonathan Lemon skb_frag_off_set(frag, pending_idx);
154ea066ad1SIan Campbell }
155ea066ad1SIan Campbell
pending_index(unsigned i)156f942dc25SIan Campbell static inline pending_ring_idx_t pending_index(unsigned i)
157f942dc25SIan Campbell {
158f942dc25SIan Campbell return i & (MAX_PENDING_REQS-1);
159f942dc25SIan Campbell }
160f942dc25SIan Campbell
xenvif_kick_thread(struct xenvif_queue * queue)161e9ce7cb6SWei Liu void xenvif_kick_thread(struct xenvif_queue *queue)
162f942dc25SIan Campbell {
163e9ce7cb6SWei Liu wake_up(&queue->wq);
164b3f980bdSWei Liu }
165b3f980bdSWei Liu
xenvif_napi_schedule_or_enable_events(struct xenvif_queue * queue)166e9ce7cb6SWei Liu void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue)
167f942dc25SIan Campbell {
168f942dc25SIan Campbell int more_to_do;
169f942dc25SIan Campbell
170e9ce7cb6SWei Liu RING_FINAL_CHECK_FOR_REQUESTS(&queue->tx, more_to_do);
171f942dc25SIan Campbell
172f942dc25SIan Campbell if (more_to_do)
173e9ce7cb6SWei Liu napi_schedule(&queue->napi);
17423025393SJuergen Gross else if (atomic_fetch_andnot(NETBK_TX_EOI | NETBK_COMMON_EOI,
17523025393SJuergen Gross &queue->eoi_pending) &
17623025393SJuergen Gross (NETBK_TX_EOI | NETBK_COMMON_EOI))
17723025393SJuergen Gross xen_irq_lateeoi(queue->tx_irq, 0);
178f942dc25SIan Campbell }
179f942dc25SIan Campbell
tx_add_credit(struct xenvif_queue * queue)180e9ce7cb6SWei Liu static void tx_add_credit(struct xenvif_queue *queue)
181f942dc25SIan Campbell {
182f942dc25SIan Campbell unsigned long max_burst, max_credit;
183f942dc25SIan Campbell
184f942dc25SIan Campbell /*
185f942dc25SIan Campbell * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
186f942dc25SIan Campbell * Otherwise the interface can seize up due to insufficient credit.
187f942dc25SIan Campbell */
1880f589967SDavid Vrabel max_burst = max(131072UL, queue->credit_bytes);
189f942dc25SIan Campbell
190f942dc25SIan Campbell /* Take care that adding a new chunk of credit doesn't wrap to zero. */
191e9ce7cb6SWei Liu max_credit = queue->remaining_credit + queue->credit_bytes;
192e9ce7cb6SWei Liu if (max_credit < queue->remaining_credit)
193f942dc25SIan Campbell max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
194f942dc25SIan Campbell
195e9ce7cb6SWei Liu queue->remaining_credit = min(max_credit, max_burst);
196dfa523aeSWei Liu queue->rate_limited = false;
197f942dc25SIan Campbell }
198f942dc25SIan Campbell
xenvif_tx_credit_callback(struct timer_list * t)199cac6a8f9SKees Cook void xenvif_tx_credit_callback(struct timer_list *t)
200f942dc25SIan Campbell {
201cac6a8f9SKees Cook struct xenvif_queue *queue = from_timer(queue, t, credit_timeout);
202e9ce7cb6SWei Liu tx_add_credit(queue);
203e9ce7cb6SWei Liu xenvif_napi_schedule_or_enable_events(queue);
204f942dc25SIan Campbell }
205f942dc25SIan Campbell
xenvif_tx_err(struct xenvif_queue * queue,struct xen_netif_tx_request * txp,unsigned int extra_count,RING_IDX end)206e9ce7cb6SWei Liu static void xenvif_tx_err(struct xenvif_queue *queue,
207562abd39SPaul Durrant struct xen_netif_tx_request *txp,
208562abd39SPaul Durrant unsigned int extra_count, RING_IDX end)
209f942dc25SIan Campbell {
210e9ce7cb6SWei Liu RING_IDX cons = queue->tx.req_cons;
211f942dc25SIan Campbell
212f942dc25SIan Campbell do {
213562abd39SPaul Durrant make_tx_response(queue, txp, extra_count, XEN_NETIF_RSP_ERROR);
214b9149729SIan Campbell if (cons == end)
215f942dc25SIan Campbell break;
21668a33bfdSDavid Vrabel RING_COPY_REQUEST(&queue->tx, cons++, txp);
21772eec92aSPaul Durrant extra_count = 0; /* only the first frag can have extras */
218f942dc25SIan Campbell } while (1);
219e9ce7cb6SWei Liu queue->tx.req_cons = cons;
220f942dc25SIan Campbell }
221f942dc25SIan Campbell
xenvif_fatal_tx_err(struct xenvif * vif)2227376419aSWei Liu static void xenvif_fatal_tx_err(struct xenvif *vif)
22348856286SIan Campbell {
22448856286SIan Campbell netdev_err(vif->dev, "fatal error; disabling device\n");
225e9d8b2c2SWei Liu vif->disabled = true;
226e9ce7cb6SWei Liu /* Disable the vif from queue 0's kthread */
227b17075d5SIgor Druzhinin if (vif->num_queues)
228e9ce7cb6SWei Liu xenvif_kick_thread(&vif->queues[0]);
22948856286SIan Campbell }
23048856286SIan Campbell
xenvif_count_requests(struct xenvif_queue * queue,struct xen_netif_tx_request * first,unsigned int extra_count,struct xen_netif_tx_request * txp,int work_to_do)231e9ce7cb6SWei Liu static int xenvif_count_requests(struct xenvif_queue *queue,
232f942dc25SIan Campbell struct xen_netif_tx_request *first,
233562abd39SPaul Durrant unsigned int extra_count,
234f942dc25SIan Campbell struct xen_netif_tx_request *txp,
235f942dc25SIan Campbell int work_to_do)
236f942dc25SIan Campbell {
237e9ce7cb6SWei Liu RING_IDX cons = queue->tx.req_cons;
2382810e5b9SWei Liu int slots = 0;
2392810e5b9SWei Liu int drop_err = 0;
24059ccb4ebSWei Liu int more_data;
241f942dc25SIan Campbell
242f942dc25SIan Campbell if (!(first->flags & XEN_NETTXF_more_data))
243f942dc25SIan Campbell return 0;
244f942dc25SIan Campbell
245f942dc25SIan Campbell do {
24659ccb4ebSWei Liu struct xen_netif_tx_request dropped_tx = { 0 };
24759ccb4ebSWei Liu
2482810e5b9SWei Liu if (slots >= work_to_do) {
249e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
2502810e5b9SWei Liu "Asked for %d slots but exceeds this limit\n",
2512810e5b9SWei Liu work_to_do);
252e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
25335876b5fSDavid Vrabel return -ENODATA;
254f942dc25SIan Campbell }
255f942dc25SIan Campbell
2562810e5b9SWei Liu /* This guest is really using too many slots and
2572810e5b9SWei Liu * considered malicious.
2582810e5b9SWei Liu */
25937641494SWei Liu if (unlikely(slots >= fatal_skb_slots)) {
260e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
2612810e5b9SWei Liu "Malicious frontend using %d slots, threshold %u\n",
26237641494SWei Liu slots, fatal_skb_slots);
263e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
26435876b5fSDavid Vrabel return -E2BIG;
265f942dc25SIan Campbell }
266f942dc25SIan Campbell
2672810e5b9SWei Liu /* Xen network protocol had implicit dependency on
26837641494SWei Liu * MAX_SKB_FRAGS. XEN_NETBK_LEGACY_SLOTS_MAX is set to
26937641494SWei Liu * the historical MAX_SKB_FRAGS value 18 to honor the
27037641494SWei Liu * same behavior as before. Any packet using more than
27137641494SWei Liu * 18 slots but less than fatal_skb_slots slots is
27237641494SWei Liu * dropped
2732810e5b9SWei Liu */
27437641494SWei Liu if (!drop_err && slots >= XEN_NETBK_LEGACY_SLOTS_MAX) {
2752810e5b9SWei Liu if (net_ratelimit())
276e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
2772810e5b9SWei Liu "Too many slots (%d) exceeding limit (%d), dropping packet\n",
27837641494SWei Liu slots, XEN_NETBK_LEGACY_SLOTS_MAX);
2792810e5b9SWei Liu drop_err = -E2BIG;
2802810e5b9SWei Liu }
2812810e5b9SWei Liu
28259ccb4ebSWei Liu if (drop_err)
28359ccb4ebSWei Liu txp = &dropped_tx;
28459ccb4ebSWei Liu
28568a33bfdSDavid Vrabel RING_COPY_REQUEST(&queue->tx, cons + slots, txp);
28603393fd5SWei Liu
28703393fd5SWei Liu /* If the guest submitted a frame >= 64 KiB then
28803393fd5SWei Liu * first->size overflowed and following slots will
28903393fd5SWei Liu * appear to be larger than the frame.
29003393fd5SWei Liu *
29103393fd5SWei Liu * This cannot be fatal error as there are buggy
29203393fd5SWei Liu * frontends that do this.
29303393fd5SWei Liu *
29403393fd5SWei Liu * Consume all slots and drop the packet.
29503393fd5SWei Liu */
29603393fd5SWei Liu if (!drop_err && txp->size > first->size) {
29703393fd5SWei Liu if (net_ratelimit())
298e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
2992810e5b9SWei Liu "Invalid tx request, slot size %u > remaining size %u\n",
3002810e5b9SWei Liu txp->size, first->size);
30103393fd5SWei Liu drop_err = -EIO;
302f942dc25SIan Campbell }
303f942dc25SIan Campbell
304f942dc25SIan Campbell first->size -= txp->size;
3052810e5b9SWei Liu slots++;
306f942dc25SIan Campbell
307d0089e8aSJulien Grall if (unlikely((txp->offset + txp->size) > XEN_PAGE_SIZE)) {
30868946159SJulien Grall netdev_err(queue->vif->dev, "Cross page boundary, txp->offset: %u, size: %u\n",
309f942dc25SIan Campbell txp->offset, txp->size);
310e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
31135876b5fSDavid Vrabel return -EINVAL;
312f942dc25SIan Campbell }
31359ccb4ebSWei Liu
31459ccb4ebSWei Liu more_data = txp->flags & XEN_NETTXF_more_data;
31559ccb4ebSWei Liu
31659ccb4ebSWei Liu if (!drop_err)
31759ccb4ebSWei Liu txp++;
31859ccb4ebSWei Liu
31959ccb4ebSWei Liu } while (more_data);
3202810e5b9SWei Liu
3212810e5b9SWei Liu if (drop_err) {
322562abd39SPaul Durrant xenvif_tx_err(queue, first, extra_count, cons + slots);
3232810e5b9SWei Liu return drop_err;
3242810e5b9SWei Liu }
3252810e5b9SWei Liu
3262810e5b9SWei Liu return slots;
327f942dc25SIan Campbell }
328f942dc25SIan Campbell
3298f13dd96SZoltan Kiss
3308f13dd96SZoltan Kiss struct xenvif_tx_cb {
331ad7f402aSRoss Lagerwall u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
332ad7f402aSRoss Lagerwall u8 copy_count;
33305310f31SJuergen Gross u32 split_mask;
3348f13dd96SZoltan Kiss };
3358f13dd96SZoltan Kiss
3368f13dd96SZoltan Kiss #define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
337ad7f402aSRoss Lagerwall #define copy_pending_idx(skb, i) (XENVIF_TX_CB(skb)->copy_pending_idx[i])
338ad7f402aSRoss Lagerwall #define copy_count(skb) (XENVIF_TX_CB(skb)->copy_count)
3398f13dd96SZoltan Kiss
xenvif_tx_create_map_op(struct xenvif_queue * queue,u16 pending_idx,struct xen_netif_tx_request * txp,unsigned int extra_count,struct gnttab_map_grant_ref * mop)340e9ce7cb6SWei Liu static inline void xenvif_tx_create_map_op(struct xenvif_queue *queue,
341f53c3fe8SZoltan Kiss u16 pending_idx,
342f53c3fe8SZoltan Kiss struct xen_netif_tx_request *txp,
343562abd39SPaul Durrant unsigned int extra_count,
3449074ce24SZoltan Kiss struct gnttab_map_grant_ref *mop)
345f53c3fe8SZoltan Kiss {
346e9ce7cb6SWei Liu queue->pages_to_map[mop-queue->tx_map_ops] = queue->mmap_pages[pending_idx];
347e9ce7cb6SWei Liu gnttab_set_map_op(mop, idx_to_kaddr(queue, pending_idx),
348f53c3fe8SZoltan Kiss GNTMAP_host_map | GNTMAP_readonly,
349e9ce7cb6SWei Liu txp->gref, queue->vif->domid);
350f53c3fe8SZoltan Kiss
351e9ce7cb6SWei Liu memcpy(&queue->pending_tx_info[pending_idx].req, txp,
352f53c3fe8SZoltan Kiss sizeof(*txp));
353562abd39SPaul Durrant queue->pending_tx_info[pending_idx].extra_count = extra_count;
354f53c3fe8SZoltan Kiss }
355f53c3fe8SZoltan Kiss
xenvif_alloc_skb(unsigned int size)356e3377f36SZoltan Kiss static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
357e3377f36SZoltan Kiss {
358e3377f36SZoltan Kiss struct sk_buff *skb =
359e3377f36SZoltan Kiss alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
360e3377f36SZoltan Kiss GFP_ATOMIC | __GFP_NOWARN);
36105310f31SJuergen Gross
36205310f31SJuergen Gross BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
363e3377f36SZoltan Kiss if (unlikely(skb == NULL))
364e3377f36SZoltan Kiss return NULL;
365e3377f36SZoltan Kiss
366e3377f36SZoltan Kiss /* Packets passed to netif_rx() must have some headroom. */
367e3377f36SZoltan Kiss skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
368e3377f36SZoltan Kiss
369e3377f36SZoltan Kiss /* Initialize it here to avoid later surprises */
370e3377f36SZoltan Kiss skb_shinfo(skb)->destructor_arg = NULL;
371e3377f36SZoltan Kiss
372e3377f36SZoltan Kiss return skb;
373e3377f36SZoltan Kiss }
374e3377f36SZoltan Kiss
xenvif_get_requests(struct xenvif_queue * queue,struct sk_buff * skb,struct xen_netif_tx_request * first,struct xen_netif_tx_request * txfrags,unsigned * copy_ops,unsigned * map_ops,unsigned int frag_overflow,struct sk_buff * nskb,unsigned int extra_count,unsigned int data_len)375ad7f402aSRoss Lagerwall static void xenvif_get_requests(struct xenvif_queue *queue,
376f942dc25SIan Campbell struct sk_buff *skb,
377ad7f402aSRoss Lagerwall struct xen_netif_tx_request *first,
378ad7f402aSRoss Lagerwall struct xen_netif_tx_request *txfrags,
379ad7f402aSRoss Lagerwall unsigned *copy_ops,
380ad7f402aSRoss Lagerwall unsigned *map_ops,
3812475b225SRoss Lagerwall unsigned int frag_overflow,
382ad7f402aSRoss Lagerwall struct sk_buff *nskb,
383ad7f402aSRoss Lagerwall unsigned int extra_count,
384ad7f402aSRoss Lagerwall unsigned int data_len)
385f942dc25SIan Campbell {
386f942dc25SIan Campbell struct skb_shared_info *shinfo = skb_shinfo(skb);
387f942dc25SIan Campbell skb_frag_t *frags = shinfo->frags;
388ad7f402aSRoss Lagerwall u16 pending_idx;
38962bad319SZoltan Kiss pending_ring_idx_t index;
3902475b225SRoss Lagerwall unsigned int nr_slots;
391ad7f402aSRoss Lagerwall struct gnttab_copy *cop = queue->tx_copy_ops + *copy_ops;
392ad7f402aSRoss Lagerwall struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops;
393ad7f402aSRoss Lagerwall struct xen_netif_tx_request *txp = first;
3942810e5b9SWei Liu
395534fc31dSRoss Lagerwall nr_slots = shinfo->nr_frags + frag_overflow + 1;
396f942dc25SIan Campbell
397ad7f402aSRoss Lagerwall copy_count(skb) = 0;
39805310f31SJuergen Gross XENVIF_TX_CB(skb)->split_mask = 0;
399f942dc25SIan Campbell
400ad7f402aSRoss Lagerwall /* Create copy ops for exactly data_len bytes into the skb head. */
401ad7f402aSRoss Lagerwall __skb_put(skb, data_len);
402ad7f402aSRoss Lagerwall while (data_len > 0) {
403ad7f402aSRoss Lagerwall int amount = data_len > txp->size ? txp->size : data_len;
40405310f31SJuergen Gross bool split = false;
405ad7f402aSRoss Lagerwall
406ad7f402aSRoss Lagerwall cop->source.u.ref = txp->gref;
407ad7f402aSRoss Lagerwall cop->source.domid = queue->vif->domid;
408ad7f402aSRoss Lagerwall cop->source.offset = txp->offset;
409ad7f402aSRoss Lagerwall
410ad7f402aSRoss Lagerwall cop->dest.domid = DOMID_SELF;
411ad7f402aSRoss Lagerwall cop->dest.offset = (offset_in_page(skb->data +
412ad7f402aSRoss Lagerwall skb_headlen(skb) -
413ad7f402aSRoss Lagerwall data_len)) & ~XEN_PAGE_MASK;
414ad7f402aSRoss Lagerwall cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
415ad7f402aSRoss Lagerwall - data_len);
416ad7f402aSRoss Lagerwall
41705310f31SJuergen Gross /* Don't cross local page boundary! */
41805310f31SJuergen Gross if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
41905310f31SJuergen Gross amount = XEN_PAGE_SIZE - cop->dest.offset;
42005310f31SJuergen Gross XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
42105310f31SJuergen Gross split = true;
42205310f31SJuergen Gross }
42305310f31SJuergen Gross
424ad7f402aSRoss Lagerwall cop->len = amount;
425ad7f402aSRoss Lagerwall cop->flags = GNTCOPY_source_gref;
426ad7f402aSRoss Lagerwall
427ad7f402aSRoss Lagerwall index = pending_index(queue->pending_cons);
428ad7f402aSRoss Lagerwall pending_idx = queue->pending_ring[index];
429ad7f402aSRoss Lagerwall callback_param(queue, pending_idx).ctx = NULL;
430ad7f402aSRoss Lagerwall copy_pending_idx(skb, copy_count(skb)) = pending_idx;
43105310f31SJuergen Gross if (!split)
432ad7f402aSRoss Lagerwall copy_count(skb)++;
433ad7f402aSRoss Lagerwall
434ad7f402aSRoss Lagerwall cop++;
435ad7f402aSRoss Lagerwall data_len -= amount;
436ad7f402aSRoss Lagerwall
437ad7f402aSRoss Lagerwall if (amount == txp->size) {
438ad7f402aSRoss Lagerwall /* The copy op covered the full tx_request */
439ad7f402aSRoss Lagerwall
440ad7f402aSRoss Lagerwall memcpy(&queue->pending_tx_info[pending_idx].req,
441ad7f402aSRoss Lagerwall txp, sizeof(*txp));
442ad7f402aSRoss Lagerwall queue->pending_tx_info[pending_idx].extra_count =
443ad7f402aSRoss Lagerwall (txp == first) ? extra_count : 0;
444ad7f402aSRoss Lagerwall
445ad7f402aSRoss Lagerwall if (txp == first)
446ad7f402aSRoss Lagerwall txp = txfrags;
447ad7f402aSRoss Lagerwall else
448ad7f402aSRoss Lagerwall txp++;
449ad7f402aSRoss Lagerwall queue->pending_cons++;
450ad7f402aSRoss Lagerwall nr_slots--;
451ad7f402aSRoss Lagerwall } else {
452ad7f402aSRoss Lagerwall /* The copy op partially covered the tx_request.
45305310f31SJuergen Gross * The remainder will be mapped or copied in the next
45405310f31SJuergen Gross * iteration.
455ad7f402aSRoss Lagerwall */
456ad7f402aSRoss Lagerwall txp->offset += amount;
457ad7f402aSRoss Lagerwall txp->size -= amount;
458ad7f402aSRoss Lagerwall }
459ad7f402aSRoss Lagerwall }
460ad7f402aSRoss Lagerwall
461534fc31dSRoss Lagerwall for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
462c7ec4f2dSJan Beulich nr_slots--) {
463c7ec4f2dSJan Beulich if (unlikely(!txp->size)) {
464c7ec4f2dSJan Beulich make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
465c7ec4f2dSJan Beulich ++txp;
466c7ec4f2dSJan Beulich continue;
467c7ec4f2dSJan Beulich }
468c7ec4f2dSJan Beulich
469e9ce7cb6SWei Liu index = pending_index(queue->pending_cons++);
470e9ce7cb6SWei Liu pending_idx = queue->pending_ring[index];
471ad7f402aSRoss Lagerwall xenvif_tx_create_map_op(queue, pending_idx, txp,
472ad7f402aSRoss Lagerwall txp == first ? extra_count : 0, gop);
473f53c3fe8SZoltan Kiss frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
474c7ec4f2dSJan Beulich ++shinfo->nr_frags;
475c7ec4f2dSJan Beulich ++gop;
476ad7f402aSRoss Lagerwall
477ad7f402aSRoss Lagerwall if (txp == first)
478ad7f402aSRoss Lagerwall txp = txfrags;
479ad7f402aSRoss Lagerwall else
480ad7f402aSRoss Lagerwall txp++;
4812810e5b9SWei Liu }
4822810e5b9SWei Liu
483534fc31dSRoss Lagerwall if (nr_slots > 0) {
484e3377f36SZoltan Kiss
485e3377f36SZoltan Kiss shinfo = skb_shinfo(nskb);
486e3377f36SZoltan Kiss frags = shinfo->frags;
487e3377f36SZoltan Kiss
488c7ec4f2dSJan Beulich for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
489c7ec4f2dSJan Beulich if (unlikely(!txp->size)) {
490c7ec4f2dSJan Beulich make_tx_response(queue, txp, 0,
491c7ec4f2dSJan Beulich XEN_NETIF_RSP_OKAY);
492c7ec4f2dSJan Beulich continue;
493c7ec4f2dSJan Beulich }
494c7ec4f2dSJan Beulich
495e9ce7cb6SWei Liu index = pending_index(queue->pending_cons++);
496e9ce7cb6SWei Liu pending_idx = queue->pending_ring[index];
497562abd39SPaul Durrant xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
498562abd39SPaul Durrant gop);
499e3377f36SZoltan Kiss frag_set_pending_idx(&frags[shinfo->nr_frags],
500e3377f36SZoltan Kiss pending_idx);
501c7ec4f2dSJan Beulich ++shinfo->nr_frags;
502c7ec4f2dSJan Beulich ++gop;
503e3377f36SZoltan Kiss }
504e3377f36SZoltan Kiss
505c7ec4f2dSJan Beulich if (shinfo->nr_frags) {
506e3377f36SZoltan Kiss skb_shinfo(skb)->frag_list = nskb;
507c7ec4f2dSJan Beulich nskb = NULL;
508c7ec4f2dSJan Beulich }
509c7ec4f2dSJan Beulich }
510c7ec4f2dSJan Beulich
511c7ec4f2dSJan Beulich if (nskb) {
512534fc31dSRoss Lagerwall /* A frag_list skb was allocated but it is no longer needed
513c7ec4f2dSJan Beulich * because enough slots were converted to copy ops above or some
514c7ec4f2dSJan Beulich * were empty.
515534fc31dSRoss Lagerwall */
516534fc31dSRoss Lagerwall kfree_skb(nskb);
517e3377f36SZoltan Kiss }
5182810e5b9SWei Liu
519ad7f402aSRoss Lagerwall (*copy_ops) = cop - queue->tx_copy_ops;
520ad7f402aSRoss Lagerwall (*map_ops) = gop - queue->tx_map_ops;
521f942dc25SIan Campbell }
522f942dc25SIan Campbell
xenvif_grant_handle_set(struct xenvif_queue * queue,u16 pending_idx,grant_handle_t handle)523e9ce7cb6SWei Liu static inline void xenvif_grant_handle_set(struct xenvif_queue *queue,
524f53c3fe8SZoltan Kiss u16 pending_idx,
525f53c3fe8SZoltan Kiss grant_handle_t handle)
526f53c3fe8SZoltan Kiss {
527e9ce7cb6SWei Liu if (unlikely(queue->grant_tx_handle[pending_idx] !=
528f53c3fe8SZoltan Kiss NETBACK_INVALID_HANDLE)) {
529e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
53068946159SJulien Grall "Trying to overwrite active handle! pending_idx: 0x%x\n",
531f53c3fe8SZoltan Kiss pending_idx);
532f53c3fe8SZoltan Kiss BUG();
533f53c3fe8SZoltan Kiss }
534e9ce7cb6SWei Liu queue->grant_tx_handle[pending_idx] = handle;
535f53c3fe8SZoltan Kiss }
536f53c3fe8SZoltan Kiss
xenvif_grant_handle_reset(struct xenvif_queue * queue,u16 pending_idx)537e9ce7cb6SWei Liu static inline void xenvif_grant_handle_reset(struct xenvif_queue *queue,
538f53c3fe8SZoltan Kiss u16 pending_idx)
539f53c3fe8SZoltan Kiss {
540e9ce7cb6SWei Liu if (unlikely(queue->grant_tx_handle[pending_idx] ==
541f53c3fe8SZoltan Kiss NETBACK_INVALID_HANDLE)) {
542e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
54368946159SJulien Grall "Trying to unmap invalid handle! pending_idx: 0x%x\n",
544f53c3fe8SZoltan Kiss pending_idx);
545f53c3fe8SZoltan Kiss BUG();
546f53c3fe8SZoltan Kiss }
547e9ce7cb6SWei Liu queue->grant_tx_handle[pending_idx] = NETBACK_INVALID_HANDLE;
548f53c3fe8SZoltan Kiss }
549f53c3fe8SZoltan Kiss
xenvif_tx_check_gop(struct xenvif_queue * queue,struct sk_buff * skb,struct gnttab_map_grant_ref ** gopp_map,struct gnttab_copy ** gopp_copy)550e9ce7cb6SWei Liu static int xenvif_tx_check_gop(struct xenvif_queue *queue,
551f942dc25SIan Campbell struct sk_buff *skb,
552bdab8275SZoltan Kiss struct gnttab_map_grant_ref **gopp_map,
553bdab8275SZoltan Kiss struct gnttab_copy **gopp_copy)
554f942dc25SIan Campbell {
5559074ce24SZoltan Kiss struct gnttab_map_grant_ref *gop_map = *gopp_map;
556ad7f402aSRoss Lagerwall u16 pending_idx;
5571a998d3eSZoltan Kiss /* This always points to the shinfo of the skb being checked, which
5581a998d3eSZoltan Kiss * could be either the first or the one on the frag_list
5591a998d3eSZoltan Kiss */
560f942dc25SIan Campbell struct skb_shared_info *shinfo = skb_shinfo(skb);
5611a998d3eSZoltan Kiss /* If this is non-NULL, we are currently checking the frag_list skb, and
5621a998d3eSZoltan Kiss * this points to the shinfo of the first one
5631a998d3eSZoltan Kiss */
5641a998d3eSZoltan Kiss struct skb_shared_info *first_shinfo = NULL;
565f942dc25SIan Campbell int nr_frags = shinfo->nr_frags;
5661b860da0SZoltan Kiss const bool sharedslot = nr_frags &&
567ad7f402aSRoss Lagerwall frag_get_pending_idx(&shinfo->frags[0]) ==
568ad7f402aSRoss Lagerwall copy_pending_idx(skb, copy_count(skb) - 1);
5697dfa764eSJuergen Gross int i, err = 0;
570f942dc25SIan Campbell
571ad7f402aSRoss Lagerwall for (i = 0; i < copy_count(skb); i++) {
572ad7f402aSRoss Lagerwall int newerr;
573ad7f402aSRoss Lagerwall
574f942dc25SIan Campbell /* Check status of header. */
575ad7f402aSRoss Lagerwall pending_idx = copy_pending_idx(skb, i);
576ad7f402aSRoss Lagerwall
577ad7f402aSRoss Lagerwall newerr = (*gopp_copy)->status;
57805310f31SJuergen Gross
57905310f31SJuergen Gross /* Split copies need to be handled together. */
58005310f31SJuergen Gross if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
58105310f31SJuergen Gross (*gopp_copy)++;
58205310f31SJuergen Gross if (!newerr)
58305310f31SJuergen Gross newerr = (*gopp_copy)->status;
58405310f31SJuergen Gross }
585ad7f402aSRoss Lagerwall if (likely(!newerr)) {
586ad7f402aSRoss Lagerwall /* The first frag might still have this slot mapped */
587ad7f402aSRoss Lagerwall if (i < copy_count(skb) - 1 || !sharedslot)
588ad7f402aSRoss Lagerwall xenvif_idx_release(queue, pending_idx,
589ad7f402aSRoss Lagerwall XEN_NETIF_RSP_OKAY);
590ad7f402aSRoss Lagerwall } else {
591ad7f402aSRoss Lagerwall err = newerr;
592bdab8275SZoltan Kiss if (net_ratelimit())
593e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
59400aefcebSZoltan Kiss "Grant copy of header failed! status: %d pending_idx: %u ref: %u\n",
595bdab8275SZoltan Kiss (*gopp_copy)->status,
596bdab8275SZoltan Kiss pending_idx,
597bdab8275SZoltan Kiss (*gopp_copy)->source.u.ref);
5981b860da0SZoltan Kiss /* The first frag might still have this slot mapped */
599ad7f402aSRoss Lagerwall if (i < copy_count(skb) - 1 || !sharedslot)
6001b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6011b860da0SZoltan Kiss XEN_NETIF_RSP_ERROR);
602bdab8275SZoltan Kiss }
603d8cfbfc4SZoltan Kiss (*gopp_copy)++;
604ad7f402aSRoss Lagerwall }
605f942dc25SIan Campbell
606e3377f36SZoltan Kiss check_frags:
607bdab8275SZoltan Kiss for (i = 0; i < nr_frags; i++, gop_map++) {
608f942dc25SIan Campbell int j, newerr;
609f942dc25SIan Campbell
610ea066ad1SIan Campbell pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
611f942dc25SIan Campbell
612f942dc25SIan Campbell /* Check error status: if okay then remember grant handle. */
613bdab8275SZoltan Kiss newerr = gop_map->status;
6142810e5b9SWei Liu
615f942dc25SIan Campbell if (likely(!newerr)) {
616e9ce7cb6SWei Liu xenvif_grant_handle_set(queue,
6179074ce24SZoltan Kiss pending_idx,
6189074ce24SZoltan Kiss gop_map->handle);
619f942dc25SIan Campbell /* Had a previous error? Invalidate this fragment. */
6201b860da0SZoltan Kiss if (unlikely(err)) {
621e9ce7cb6SWei Liu xenvif_idx_unmap(queue, pending_idx);
6221b860da0SZoltan Kiss /* If the mapping of the first frag was OK, but
6231b860da0SZoltan Kiss * the header's copy failed, and they are
6241b860da0SZoltan Kiss * sharing a slot, send an error
6251b860da0SZoltan Kiss */
6263ede7f84SJan Beulich if (i == 0 && !first_shinfo && sharedslot)
6271b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6281b860da0SZoltan Kiss XEN_NETIF_RSP_ERROR);
6291b860da0SZoltan Kiss else
6301b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6311b860da0SZoltan Kiss XEN_NETIF_RSP_OKAY);
6321b860da0SZoltan Kiss }
633f942dc25SIan Campbell continue;
634f942dc25SIan Campbell }
635f942dc25SIan Campbell
636f942dc25SIan Campbell /* Error on this fragment: respond to client with an error. */
637bdab8275SZoltan Kiss if (net_ratelimit())
638e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
63900aefcebSZoltan Kiss "Grant map of %d. frag failed! status: %d pending_idx: %u ref: %u\n",
640bdab8275SZoltan Kiss i,
641bdab8275SZoltan Kiss gop_map->status,
642bdab8275SZoltan Kiss pending_idx,
643bdab8275SZoltan Kiss gop_map->ref);
6441b860da0SZoltan Kiss
645e9ce7cb6SWei Liu xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
646f942dc25SIan Campbell
647f942dc25SIan Campbell /* Not the first error? Preceding frags already invalidated. */
648f942dc25SIan Campbell if (err)
649f942dc25SIan Campbell continue;
6501b860da0SZoltan Kiss
6511b860da0SZoltan Kiss /* Invalidate preceding fragments of this skb. */
652bdab8275SZoltan Kiss for (j = 0; j < i; j++) {
6535ccb3ea7SJan Beulich pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
654e9ce7cb6SWei Liu xenvif_idx_unmap(queue, pending_idx);
6551b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6561b860da0SZoltan Kiss XEN_NETIF_RSP_OKAY);
657f942dc25SIan Campbell }
658f942dc25SIan Campbell
6591a998d3eSZoltan Kiss /* And if we found the error while checking the frag_list, unmap
6601a998d3eSZoltan Kiss * the first skb's frags
6611a998d3eSZoltan Kiss */
6621a998d3eSZoltan Kiss if (first_shinfo) {
6631a998d3eSZoltan Kiss for (j = 0; j < first_shinfo->nr_frags; j++) {
6641a998d3eSZoltan Kiss pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
6651a998d3eSZoltan Kiss xenvif_idx_unmap(queue, pending_idx);
6661b860da0SZoltan Kiss xenvif_idx_release(queue, pending_idx,
6671b860da0SZoltan Kiss XEN_NETIF_RSP_OKAY);
6681a998d3eSZoltan Kiss }
669f942dc25SIan Campbell }
670f942dc25SIan Campbell
671f942dc25SIan Campbell /* Remember the error: invalidate all subsequent fragments. */
672f942dc25SIan Campbell err = newerr;
673f942dc25SIan Campbell }
674f942dc25SIan Campbell
6751a998d3eSZoltan Kiss if (skb_has_frag_list(skb) && !first_shinfo) {
676826d8217SJan Beulich first_shinfo = shinfo;
677826d8217SJan Beulich shinfo = skb_shinfo(shinfo->frag_list);
678e3377f36SZoltan Kiss nr_frags = shinfo->nr_frags;
679e3377f36SZoltan Kiss
680e3377f36SZoltan Kiss goto check_frags;
681e3377f36SZoltan Kiss }
682e3377f36SZoltan Kiss
683bdab8275SZoltan Kiss *gopp_map = gop_map;
684f942dc25SIan Campbell return err;
685f942dc25SIan Campbell }
686f942dc25SIan Campbell
xenvif_fill_frags(struct xenvif_queue * queue,struct sk_buff * skb)687e9ce7cb6SWei Liu static void xenvif_fill_frags(struct xenvif_queue *queue, struct sk_buff *skb)
688f942dc25SIan Campbell {
689f942dc25SIan Campbell struct skb_shared_info *shinfo = skb_shinfo(skb);
690f942dc25SIan Campbell int nr_frags = shinfo->nr_frags;
691f942dc25SIan Campbell int i;
692f53c3fe8SZoltan Kiss u16 prev_pending_idx = INVALID_PENDING_IDX;
693f53c3fe8SZoltan Kiss
694f942dc25SIan Campbell for (i = 0; i < nr_frags; i++) {
695f942dc25SIan Campbell skb_frag_t *frag = shinfo->frags + i;
696f942dc25SIan Campbell struct xen_netif_tx_request *txp;
697ea066ad1SIan Campbell struct page *page;
698ea066ad1SIan Campbell u16 pending_idx;
699f942dc25SIan Campbell
700ea066ad1SIan Campbell pending_idx = frag_get_pending_idx(frag);
701f942dc25SIan Campbell
702f53c3fe8SZoltan Kiss /* If this is not the first frag, chain it to the previous*/
703bdab8275SZoltan Kiss if (prev_pending_idx == INVALID_PENDING_IDX)
704f53c3fe8SZoltan Kiss skb_shinfo(skb)->destructor_arg =
705e9ce7cb6SWei Liu &callback_param(queue, pending_idx);
706bdab8275SZoltan Kiss else
707e9ce7cb6SWei Liu callback_param(queue, prev_pending_idx).ctx =
708e9ce7cb6SWei Liu &callback_param(queue, pending_idx);
709f53c3fe8SZoltan Kiss
710e9ce7cb6SWei Liu callback_param(queue, pending_idx).ctx = NULL;
711f53c3fe8SZoltan Kiss prev_pending_idx = pending_idx;
712f53c3fe8SZoltan Kiss
713e9ce7cb6SWei Liu txp = &queue->pending_tx_info[pending_idx].req;
714e36bfc0bSLinus Walleij page = virt_to_page((void *)idx_to_kaddr(queue, pending_idx));
715ea066ad1SIan Campbell __skb_fill_page_desc(skb, i, page, txp->offset, txp->size);
716f942dc25SIan Campbell skb->len += txp->size;
717f942dc25SIan Campbell skb->data_len += txp->size;
718f942dc25SIan Campbell skb->truesize += txp->size;
719f942dc25SIan Campbell
720f53c3fe8SZoltan Kiss /* Take an extra reference to offset network stack's put_page */
721e9ce7cb6SWei Liu get_page(queue->mmap_pages[pending_idx]);
722f942dc25SIan Campbell }
723f942dc25SIan Campbell }
724f942dc25SIan Campbell
xenvif_get_extras(struct xenvif_queue * queue,struct xen_netif_extra_info * extras,unsigned int * extra_count,int work_to_do)725e9ce7cb6SWei Liu static int xenvif_get_extras(struct xenvif_queue *queue,
726f942dc25SIan Campbell struct xen_netif_extra_info *extras,
727562abd39SPaul Durrant unsigned int *extra_count,
728f942dc25SIan Campbell int work_to_do)
729f942dc25SIan Campbell {
730f942dc25SIan Campbell struct xen_netif_extra_info extra;
731e9ce7cb6SWei Liu RING_IDX cons = queue->tx.req_cons;
732f942dc25SIan Campbell
733f942dc25SIan Campbell do {
734f942dc25SIan Campbell if (unlikely(work_to_do-- <= 0)) {
735e9ce7cb6SWei Liu netdev_err(queue->vif->dev, "Missing extra info\n");
736e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
737f942dc25SIan Campbell return -EBADR;
738f942dc25SIan Campbell }
739f942dc25SIan Campbell
74068a33bfdSDavid Vrabel RING_COPY_REQUEST(&queue->tx, cons, &extra);
741562abd39SPaul Durrant
742562abd39SPaul Durrant queue->tx.req_cons = ++cons;
743562abd39SPaul Durrant (*extra_count)++;
744562abd39SPaul Durrant
745f942dc25SIan Campbell if (unlikely(!extra.type ||
746f942dc25SIan Campbell extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
747e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
748f942dc25SIan Campbell "Invalid extra type: %d\n", extra.type);
749e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
750f942dc25SIan Campbell return -EINVAL;
751f942dc25SIan Campbell }
752f942dc25SIan Campbell
753f942dc25SIan Campbell memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
754f942dc25SIan Campbell } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
755f942dc25SIan Campbell
756f942dc25SIan Campbell return work_to_do;
757f942dc25SIan Campbell }
758f942dc25SIan Campbell
xenvif_set_skb_gso(struct xenvif * vif,struct sk_buff * skb,struct xen_netif_extra_info * gso)7597376419aSWei Liu static int xenvif_set_skb_gso(struct xenvif *vif,
760f942dc25SIan Campbell struct sk_buff *skb,
761f942dc25SIan Campbell struct xen_netif_extra_info *gso)
762f942dc25SIan Campbell {
763f942dc25SIan Campbell if (!gso->u.gso.size) {
76448856286SIan Campbell netdev_err(vif->dev, "GSO size must not be zero.\n");
7657376419aSWei Liu xenvif_fatal_tx_err(vif);
766f942dc25SIan Campbell return -EINVAL;
767f942dc25SIan Campbell }
768f942dc25SIan Campbell
769a9468587SPaul Durrant switch (gso->u.gso.type) {
770a9468587SPaul Durrant case XEN_NETIF_GSO_TYPE_TCPV4:
771a9468587SPaul Durrant skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
772a9468587SPaul Durrant break;
773a9468587SPaul Durrant case XEN_NETIF_GSO_TYPE_TCPV6:
774a9468587SPaul Durrant skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
775a9468587SPaul Durrant break;
776a9468587SPaul Durrant default:
77748856286SIan Campbell netdev_err(vif->dev, "Bad GSO type %d.\n", gso->u.gso.type);
7787376419aSWei Liu xenvif_fatal_tx_err(vif);
779f942dc25SIan Campbell return -EINVAL;
780f942dc25SIan Campbell }
781f942dc25SIan Campbell
782f942dc25SIan Campbell skb_shinfo(skb)->gso_size = gso->u.gso.size;
783b89587a7SPaul Durrant /* gso_segs will be calculated later */
784f942dc25SIan Campbell
785f942dc25SIan Campbell return 0;
786f942dc25SIan Campbell }
787f942dc25SIan Campbell
checksum_setup(struct xenvif_queue * queue,struct sk_buff * skb)788e9ce7cb6SWei Liu static int checksum_setup(struct xenvif_queue *queue, struct sk_buff *skb)
789f942dc25SIan Campbell {
7902721637cSPaul Durrant bool recalculate_partial_csum = false;
791f942dc25SIan Campbell
7922eba61d5SPaul Durrant /* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
793f942dc25SIan Campbell * peers can fail to set NETRXF_csum_blank when sending a GSO
794f942dc25SIan Campbell * frame. In this case force the SKB to CHECKSUM_PARTIAL and
795f942dc25SIan Campbell * recalculate the partial checksum.
796f942dc25SIan Campbell */
797f942dc25SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
798e9ce7cb6SWei Liu queue->stats.rx_gso_checksum_fixup++;
799f942dc25SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL;
8002721637cSPaul Durrant recalculate_partial_csum = true;
801f942dc25SIan Campbell }
802f942dc25SIan Campbell
803f942dc25SIan Campbell /* A non-CHECKSUM_PARTIAL SKB does not require setup. */
804f942dc25SIan Campbell if (skb->ip_summed != CHECKSUM_PARTIAL)
805f942dc25SIan Campbell return 0;
806f942dc25SIan Campbell
8072721637cSPaul Durrant return skb_checksum_setup(skb, recalculate_partial_csum);
808f942dc25SIan Campbell }
809f942dc25SIan Campbell
tx_credit_exceeded(struct xenvif_queue * queue,unsigned size)810e9ce7cb6SWei Liu static bool tx_credit_exceeded(struct xenvif_queue *queue, unsigned size)
811f942dc25SIan Campbell {
812059dfa6aSWei Liu u64 now = get_jiffies_64();
813e9ce7cb6SWei Liu u64 next_credit = queue->credit_window_start +
814e9ce7cb6SWei Liu msecs_to_jiffies(queue->credit_usec / 1000);
815f942dc25SIan Campbell
816f942dc25SIan Campbell /* Timer could already be pending in rare cases. */
817dfa523aeSWei Liu if (timer_pending(&queue->credit_timeout)) {
818dfa523aeSWei Liu queue->rate_limited = true;
819f942dc25SIan Campbell return true;
820dfa523aeSWei Liu }
821f942dc25SIan Campbell
822f942dc25SIan Campbell /* Passed the point where we can replenish credit? */
823059dfa6aSWei Liu if (time_after_eq64(now, next_credit)) {
824e9ce7cb6SWei Liu queue->credit_window_start = now;
825e9ce7cb6SWei Liu tx_add_credit(queue);
826f942dc25SIan Campbell }
827f942dc25SIan Campbell
828f942dc25SIan Campbell /* Still too big to send right now? Set a callback. */
829e9ce7cb6SWei Liu if (size > queue->remaining_credit) {
830e9ce7cb6SWei Liu mod_timer(&queue->credit_timeout,
831f942dc25SIan Campbell next_credit);
832e9ce7cb6SWei Liu queue->credit_window_start = next_credit;
833dfa523aeSWei Liu queue->rate_limited = true;
834f942dc25SIan Campbell
835f942dc25SIan Campbell return true;
836f942dc25SIan Campbell }
837f942dc25SIan Campbell
838f942dc25SIan Campbell return false;
839f942dc25SIan Campbell }
840f942dc25SIan Campbell
841210c34dcSPaul Durrant /* No locking is required in xenvif_mcast_add/del() as they are
842210c34dcSPaul Durrant * only ever invoked from NAPI poll. An RCU list is used because
843210c34dcSPaul Durrant * xenvif_mcast_match() is called asynchronously, during start_xmit.
844210c34dcSPaul Durrant */
845210c34dcSPaul Durrant
xenvif_mcast_add(struct xenvif * vif,const u8 * addr)846210c34dcSPaul Durrant static int xenvif_mcast_add(struct xenvif *vif, const u8 *addr)
847210c34dcSPaul Durrant {
848210c34dcSPaul Durrant struct xenvif_mcast_addr *mcast;
849210c34dcSPaul Durrant
850210c34dcSPaul Durrant if (vif->fe_mcast_count == XEN_NETBK_MCAST_MAX) {
851210c34dcSPaul Durrant if (net_ratelimit())
852210c34dcSPaul Durrant netdev_err(vif->dev,
853210c34dcSPaul Durrant "Too many multicast addresses\n");
854210c34dcSPaul Durrant return -ENOSPC;
855210c34dcSPaul Durrant }
856210c34dcSPaul Durrant
857210c34dcSPaul Durrant mcast = kzalloc(sizeof(*mcast), GFP_ATOMIC);
858210c34dcSPaul Durrant if (!mcast)
859210c34dcSPaul Durrant return -ENOMEM;
860210c34dcSPaul Durrant
861210c34dcSPaul Durrant ether_addr_copy(mcast->addr, addr);
862210c34dcSPaul Durrant list_add_tail_rcu(&mcast->entry, &vif->fe_mcast_addr);
863210c34dcSPaul Durrant vif->fe_mcast_count++;
864210c34dcSPaul Durrant
865210c34dcSPaul Durrant return 0;
866210c34dcSPaul Durrant }
867210c34dcSPaul Durrant
xenvif_mcast_del(struct xenvif * vif,const u8 * addr)868210c34dcSPaul Durrant static void xenvif_mcast_del(struct xenvif *vif, const u8 *addr)
869210c34dcSPaul Durrant {
870210c34dcSPaul Durrant struct xenvif_mcast_addr *mcast;
871210c34dcSPaul Durrant
872210c34dcSPaul Durrant list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
873210c34dcSPaul Durrant if (ether_addr_equal(addr, mcast->addr)) {
874210c34dcSPaul Durrant --vif->fe_mcast_count;
875210c34dcSPaul Durrant list_del_rcu(&mcast->entry);
876210c34dcSPaul Durrant kfree_rcu(mcast, rcu);
877210c34dcSPaul Durrant break;
878210c34dcSPaul Durrant }
879210c34dcSPaul Durrant }
880210c34dcSPaul Durrant }
881210c34dcSPaul Durrant
xenvif_mcast_match(struct xenvif * vif,const u8 * addr)882210c34dcSPaul Durrant bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr)
883210c34dcSPaul Durrant {
884210c34dcSPaul Durrant struct xenvif_mcast_addr *mcast;
885210c34dcSPaul Durrant
886210c34dcSPaul Durrant rcu_read_lock();
887210c34dcSPaul Durrant list_for_each_entry_rcu(mcast, &vif->fe_mcast_addr, entry) {
888210c34dcSPaul Durrant if (ether_addr_equal(addr, mcast->addr)) {
889210c34dcSPaul Durrant rcu_read_unlock();
890210c34dcSPaul Durrant return true;
891210c34dcSPaul Durrant }
892210c34dcSPaul Durrant }
893210c34dcSPaul Durrant rcu_read_unlock();
894210c34dcSPaul Durrant
895210c34dcSPaul Durrant return false;
896210c34dcSPaul Durrant }
897210c34dcSPaul Durrant
xenvif_mcast_addr_list_free(struct xenvif * vif)898210c34dcSPaul Durrant void xenvif_mcast_addr_list_free(struct xenvif *vif)
899210c34dcSPaul Durrant {
900210c34dcSPaul Durrant /* No need for locking or RCU here. NAPI poll and TX queue
901210c34dcSPaul Durrant * are stopped.
902210c34dcSPaul Durrant */
903210c34dcSPaul Durrant while (!list_empty(&vif->fe_mcast_addr)) {
904210c34dcSPaul Durrant struct xenvif_mcast_addr *mcast;
905210c34dcSPaul Durrant
906210c34dcSPaul Durrant mcast = list_first_entry(&vif->fe_mcast_addr,
907210c34dcSPaul Durrant struct xenvif_mcast_addr,
908210c34dcSPaul Durrant entry);
909210c34dcSPaul Durrant --vif->fe_mcast_count;
910210c34dcSPaul Durrant list_del(&mcast->entry);
911210c34dcSPaul Durrant kfree(mcast);
912210c34dcSPaul Durrant }
913210c34dcSPaul Durrant }
914210c34dcSPaul Durrant
xenvif_tx_build_gops(struct xenvif_queue * queue,int budget,unsigned * copy_ops,unsigned * map_ops)915e9ce7cb6SWei Liu static void xenvif_tx_build_gops(struct xenvif_queue *queue,
916bdab8275SZoltan Kiss int budget,
917bdab8275SZoltan Kiss unsigned *copy_ops,
918bdab8275SZoltan Kiss unsigned *map_ops)
919f942dc25SIan Campbell {
9202475b225SRoss Lagerwall struct sk_buff *skb, *nskb;
921f942dc25SIan Campbell int ret;
9222475b225SRoss Lagerwall unsigned int frag_overflow;
923f942dc25SIan Campbell
924e9ce7cb6SWei Liu while (skb_queue_len(&queue->tx_queue) < budget) {
925f942dc25SIan Campbell struct xen_netif_tx_request txreq;
92637641494SWei Liu struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
927f942dc25SIan Campbell struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX-1];
928562abd39SPaul Durrant unsigned int extra_count;
929f942dc25SIan Campbell RING_IDX idx;
930f942dc25SIan Campbell int work_to_do;
931f942dc25SIan Campbell unsigned int data_len;
932f942dc25SIan Campbell
933e9ce7cb6SWei Liu if (queue->tx.sring->req_prod - queue->tx.req_cons >
93448856286SIan Campbell XEN_NETIF_TX_RING_SIZE) {
935e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
93648856286SIan Campbell "Impossible number of requests. "
93748856286SIan Campbell "req_prod %d, req_cons %d, size %ld\n",
938e9ce7cb6SWei Liu queue->tx.sring->req_prod, queue->tx.req_cons,
93948856286SIan Campbell XEN_NETIF_TX_RING_SIZE);
940e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
941e9d8b2c2SWei Liu break;
94248856286SIan Campbell }
94348856286SIan Campbell
94409e545f7SJuergen Gross work_to_do = XEN_RING_NR_UNCONSUMED_REQUESTS(&queue->tx);
945b3f980bdSWei Liu if (!work_to_do)
946b3f980bdSWei Liu break;
947f942dc25SIan Campbell
948e9ce7cb6SWei Liu idx = queue->tx.req_cons;
949f942dc25SIan Campbell rmb(); /* Ensure that we see the request before we copy it. */
95068a33bfdSDavid Vrabel RING_COPY_REQUEST(&queue->tx, idx, &txreq);
951f942dc25SIan Campbell
952f942dc25SIan Campbell /* Credit-based scheduling. */
953e9ce7cb6SWei Liu if (txreq.size > queue->remaining_credit &&
954e9ce7cb6SWei Liu tx_credit_exceeded(queue, txreq.size))
955b3f980bdSWei Liu break;
956f942dc25SIan Campbell
957e9ce7cb6SWei Liu queue->remaining_credit -= txreq.size;
958f942dc25SIan Campbell
959f942dc25SIan Campbell work_to_do--;
960e9ce7cb6SWei Liu queue->tx.req_cons = ++idx;
961f942dc25SIan Campbell
962f942dc25SIan Campbell memset(extras, 0, sizeof(extras));
963562abd39SPaul Durrant extra_count = 0;
964f942dc25SIan Campbell if (txreq.flags & XEN_NETTXF_extra_info) {
965e9ce7cb6SWei Liu work_to_do = xenvif_get_extras(queue, extras,
966562abd39SPaul Durrant &extra_count,
967f942dc25SIan Campbell work_to_do);
968e9ce7cb6SWei Liu idx = queue->tx.req_cons;
96948856286SIan Campbell if (unlikely(work_to_do < 0))
970b3f980bdSWei Liu break;
971f942dc25SIan Campbell }
972f942dc25SIan Campbell
973210c34dcSPaul Durrant if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1].type) {
974210c34dcSPaul Durrant struct xen_netif_extra_info *extra;
975210c34dcSPaul Durrant
976210c34dcSPaul Durrant extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_ADD - 1];
977210c34dcSPaul Durrant ret = xenvif_mcast_add(queue->vif, extra->u.mcast.addr);
978210c34dcSPaul Durrant
979562abd39SPaul Durrant make_tx_response(queue, &txreq, extra_count,
980210c34dcSPaul Durrant (ret == 0) ?
981210c34dcSPaul Durrant XEN_NETIF_RSP_OKAY :
982210c34dcSPaul Durrant XEN_NETIF_RSP_ERROR);
983210c34dcSPaul Durrant continue;
984210c34dcSPaul Durrant }
985210c34dcSPaul Durrant
986210c34dcSPaul Durrant if (extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1].type) {
987210c34dcSPaul Durrant struct xen_netif_extra_info *extra;
988210c34dcSPaul Durrant
989210c34dcSPaul Durrant extra = &extras[XEN_NETIF_EXTRA_TYPE_MCAST_DEL - 1];
990210c34dcSPaul Durrant xenvif_mcast_del(queue->vif, extra->u.mcast.addr);
991210c34dcSPaul Durrant
992562abd39SPaul Durrant make_tx_response(queue, &txreq, extra_count,
993562abd39SPaul Durrant XEN_NETIF_RSP_OKAY);
994210c34dcSPaul Durrant continue;
995210c34dcSPaul Durrant }
996210c34dcSPaul Durrant
997ad7f402aSRoss Lagerwall data_len = (txreq.size > XEN_NETBACK_TX_COPY_LEN) ?
998ad7f402aSRoss Lagerwall XEN_NETBACK_TX_COPY_LEN : txreq.size;
999ad7f402aSRoss Lagerwall
1000562abd39SPaul Durrant ret = xenvif_count_requests(queue, &txreq, extra_count,
1001562abd39SPaul Durrant txfrags, work_to_do);
1002ad7f402aSRoss Lagerwall
100348856286SIan Campbell if (unlikely(ret < 0))
1004b3f980bdSWei Liu break;
100548856286SIan Campbell
1006f942dc25SIan Campbell idx += ret;
1007f942dc25SIan Campbell
1008f942dc25SIan Campbell if (unlikely(txreq.size < ETH_HLEN)) {
1009e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
1010f942dc25SIan Campbell "Bad packet size: %d\n", txreq.size);
1011562abd39SPaul Durrant xenvif_tx_err(queue, &txreq, extra_count, idx);
1012b3f980bdSWei Liu break;
1013f942dc25SIan Campbell }
1014f942dc25SIan Campbell
1015f942dc25SIan Campbell /* No crossing a page as the payload mustn't fragment. */
1016d0089e8aSJulien Grall if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
10172eca98e5SJuergen Gross netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
10182eca98e5SJuergen Gross txreq.offset, txreq.size);
1019e9ce7cb6SWei Liu xenvif_fatal_tx_err(queue->vif);
1020b3f980bdSWei Liu break;
1021f942dc25SIan Campbell }
1022f942dc25SIan Campbell
1023ad7f402aSRoss Lagerwall if (ret >= XEN_NETBK_LEGACY_SLOTS_MAX - 1 && data_len < txreq.size)
1024ad7f402aSRoss Lagerwall data_len = txreq.size;
1025f942dc25SIan Campbell
1026e3377f36SZoltan Kiss skb = xenvif_alloc_skb(data_len);
1027f942dc25SIan Campbell if (unlikely(skb == NULL)) {
1028e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
1029f942dc25SIan Campbell "Can't allocate a skb in start_xmit.\n");
1030562abd39SPaul Durrant xenvif_tx_err(queue, &txreq, extra_count, idx);
1031f942dc25SIan Campbell break;
1032f942dc25SIan Campbell }
1033f942dc25SIan Campbell
10342475b225SRoss Lagerwall skb_shinfo(skb)->nr_frags = ret;
10352475b225SRoss Lagerwall /* At this point shinfo->nr_frags is in fact the number of
10362475b225SRoss Lagerwall * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
10372475b225SRoss Lagerwall */
10382475b225SRoss Lagerwall frag_overflow = 0;
10392475b225SRoss Lagerwall nskb = NULL;
10402475b225SRoss Lagerwall if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
10412475b225SRoss Lagerwall frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
10422475b225SRoss Lagerwall BUG_ON(frag_overflow > MAX_SKB_FRAGS);
10432475b225SRoss Lagerwall skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
10442475b225SRoss Lagerwall nskb = xenvif_alloc_skb(0);
10452475b225SRoss Lagerwall if (unlikely(nskb == NULL)) {
10463a0233ddSRoss Lagerwall skb_shinfo(skb)->nr_frags = 0;
10472475b225SRoss Lagerwall kfree_skb(skb);
1048562abd39SPaul Durrant xenvif_tx_err(queue, &txreq, extra_count, idx);
10492475b225SRoss Lagerwall if (net_ratelimit())
10502475b225SRoss Lagerwall netdev_err(queue->vif->dev,
10512475b225SRoss Lagerwall "Can't allocate the frag_list skb.\n");
10522475b225SRoss Lagerwall break;
10532475b225SRoss Lagerwall }
10542475b225SRoss Lagerwall }
10552475b225SRoss Lagerwall
1056f942dc25SIan Campbell if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1057f942dc25SIan Campbell struct xen_netif_extra_info *gso;
1058f942dc25SIan Campbell gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1059f942dc25SIan Campbell
1060e9ce7cb6SWei Liu if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
10617376419aSWei Liu /* Failure in xenvif_set_skb_gso is fatal. */
10623a0233ddSRoss Lagerwall skb_shinfo(skb)->nr_frags = 0;
1063f942dc25SIan Campbell kfree_skb(skb);
10642475b225SRoss Lagerwall kfree_skb(nskb);
1065b3f980bdSWei Liu break;
1066f942dc25SIan Campbell }
1067f942dc25SIan Campbell }
1068f942dc25SIan Campbell
1069c2d09fdeSPaul Durrant if (extras[XEN_NETIF_EXTRA_TYPE_HASH - 1].type) {
1070c2d09fdeSPaul Durrant struct xen_netif_extra_info *extra;
1071c2d09fdeSPaul Durrant enum pkt_hash_types type = PKT_HASH_TYPE_NONE;
1072c2d09fdeSPaul Durrant
1073c2d09fdeSPaul Durrant extra = &extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
1074c2d09fdeSPaul Durrant
1075c2d09fdeSPaul Durrant switch (extra->u.hash.type) {
1076c2d09fdeSPaul Durrant case _XEN_NETIF_CTRL_HASH_TYPE_IPV4:
1077c2d09fdeSPaul Durrant case _XEN_NETIF_CTRL_HASH_TYPE_IPV6:
1078c2d09fdeSPaul Durrant type = PKT_HASH_TYPE_L3;
1079c2d09fdeSPaul Durrant break;
1080c2d09fdeSPaul Durrant
1081c2d09fdeSPaul Durrant case _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP:
1082c2d09fdeSPaul Durrant case _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP:
1083c2d09fdeSPaul Durrant type = PKT_HASH_TYPE_L4;
1084c2d09fdeSPaul Durrant break;
1085c2d09fdeSPaul Durrant
1086c2d09fdeSPaul Durrant default:
1087c2d09fdeSPaul Durrant break;
1088c2d09fdeSPaul Durrant }
1089c2d09fdeSPaul Durrant
1090c2d09fdeSPaul Durrant if (type != PKT_HASH_TYPE_NONE)
1091c2d09fdeSPaul Durrant skb_set_hash(skb,
1092c2d09fdeSPaul Durrant *(u32 *)extra->u.hash.value,
1093c2d09fdeSPaul Durrant type);
1094c2d09fdeSPaul Durrant }
1095c2d09fdeSPaul Durrant
1096ad7f402aSRoss Lagerwall xenvif_get_requests(queue, skb, &txreq, txfrags, copy_ops,
1097ad7f402aSRoss Lagerwall map_ops, frag_overflow, nskb, extra_count,
1098ad7f402aSRoss Lagerwall data_len);
1099f942dc25SIan Campbell
1100e9ce7cb6SWei Liu __skb_queue_tail(&queue->tx_queue, skb);
11011e0b6eacSAnnie Li
1102e9ce7cb6SWei Liu queue->tx.req_cons = idx;
1103f942dc25SIan Campbell }
1104f942dc25SIan Campbell
1105bdab8275SZoltan Kiss return;
1106f942dc25SIan Campbell }
1107f942dc25SIan Campbell
1108e3377f36SZoltan Kiss /* Consolidate skb with a frag_list into a brand new one with local pages on
1109e3377f36SZoltan Kiss * frags. Returns 0 or -ENOMEM if can't allocate new pages.
1110e3377f36SZoltan Kiss */
xenvif_handle_frag_list(struct xenvif_queue * queue,struct sk_buff * skb)1111e9ce7cb6SWei Liu static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *skb)
1112e3377f36SZoltan Kiss {
1113e3377f36SZoltan Kiss unsigned int offset = skb_headlen(skb);
1114e3377f36SZoltan Kiss skb_frag_t frags[MAX_SKB_FRAGS];
111549d9991aSDavid Vrabel int i, f;
1116e3377f36SZoltan Kiss struct ubuf_info *uarg;
1117e3377f36SZoltan Kiss struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
1118e3377f36SZoltan Kiss
1119e9ce7cb6SWei Liu queue->stats.tx_zerocopy_sent += 2;
1120e9ce7cb6SWei Liu queue->stats.tx_frag_overflow++;
1121e3377f36SZoltan Kiss
1122e9ce7cb6SWei Liu xenvif_fill_frags(queue, nskb);
1123e3377f36SZoltan Kiss /* Subtract frags size, we will correct it later */
1124e3377f36SZoltan Kiss skb->truesize -= skb->data_len;
1125e3377f36SZoltan Kiss skb->len += nskb->len;
1126e3377f36SZoltan Kiss skb->data_len += nskb->len;
1127e3377f36SZoltan Kiss
1128e3377f36SZoltan Kiss /* create a brand new frags array and coalesce there */
1129e3377f36SZoltan Kiss for (i = 0; offset < skb->len; i++) {
1130e3377f36SZoltan Kiss struct page *page;
1131e3377f36SZoltan Kiss unsigned int len;
1132e3377f36SZoltan Kiss
1133e3377f36SZoltan Kiss BUG_ON(i >= MAX_SKB_FRAGS);
113444cc8ed1SZoltan Kiss page = alloc_page(GFP_ATOMIC);
1135e3377f36SZoltan Kiss if (!page) {
1136e3377f36SZoltan Kiss int j;
1137e3377f36SZoltan Kiss skb->truesize += skb->data_len;
1138e3377f36SZoltan Kiss for (j = 0; j < i; j++)
1139d7840976SMatthew Wilcox (Oracle) put_page(skb_frag_page(&frags[j]));
1140e3377f36SZoltan Kiss return -ENOMEM;
1141e3377f36SZoltan Kiss }
1142e3377f36SZoltan Kiss
1143e3377f36SZoltan Kiss if (offset + PAGE_SIZE < skb->len)
1144e3377f36SZoltan Kiss len = PAGE_SIZE;
1145e3377f36SZoltan Kiss else
1146e3377f36SZoltan Kiss len = skb->len - offset;
1147e3377f36SZoltan Kiss if (skb_copy_bits(skb, offset, page_address(page), len))
1148e3377f36SZoltan Kiss BUG();
1149e3377f36SZoltan Kiss
1150e3377f36SZoltan Kiss offset += len;
1151b51f4113SYunsheng Lin skb_frag_fill_page_desc(&frags[i], page, 0, len);
1152e3377f36SZoltan Kiss }
115349d9991aSDavid Vrabel
115449d9991aSDavid Vrabel /* Release all the original (foreign) frags. */
115549d9991aSDavid Vrabel for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
115649d9991aSDavid Vrabel skb_frag_unref(skb, f);
1157e3377f36SZoltan Kiss uarg = skb_shinfo(skb)->destructor_arg;
1158a64bd934SWei Liu /* increase inflight counter to offset decrement in callback */
1159a64bd934SWei Liu atomic_inc(&queue->inflight_packets);
11607ab4f16fSPavel Begunkov uarg->ops->complete(NULL, uarg, true);
1161e3377f36SZoltan Kiss skb_shinfo(skb)->destructor_arg = NULL;
1162e3377f36SZoltan Kiss
1163b0c21badSDavid Vrabel /* Fill the skb with the new (local) frags. */
1164b0c21badSDavid Vrabel memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t));
1165b0c21badSDavid Vrabel skb_shinfo(skb)->nr_frags = i;
1166b0c21badSDavid Vrabel skb->truesize += i * PAGE_SIZE;
1167e3377f36SZoltan Kiss
1168e3377f36SZoltan Kiss return 0;
1169e3377f36SZoltan Kiss }
1170f942dc25SIan Campbell
xenvif_tx_submit(struct xenvif_queue * queue)1171e9ce7cb6SWei Liu static int xenvif_tx_submit(struct xenvif_queue *queue)
1172b3f980bdSWei Liu {
1173e9ce7cb6SWei Liu struct gnttab_map_grant_ref *gop_map = queue->tx_map_ops;
1174e9ce7cb6SWei Liu struct gnttab_copy *gop_copy = queue->tx_copy_ops;
1175b3f980bdSWei Liu struct sk_buff *skb;
1176b3f980bdSWei Liu int work_done = 0;
1177b3f980bdSWei Liu
1178e9ce7cb6SWei Liu while ((skb = __skb_dequeue(&queue->tx_queue)) != NULL) {
1179f942dc25SIan Campbell struct xen_netif_tx_request *txp;
1180f942dc25SIan Campbell u16 pending_idx;
1181f942dc25SIan Campbell
1182ad7f402aSRoss Lagerwall pending_idx = copy_pending_idx(skb, 0);
1183e9ce7cb6SWei Liu txp = &queue->pending_tx_info[pending_idx].req;
1184f942dc25SIan Campbell
1185f942dc25SIan Campbell /* Check the remap error code. */
1186e9ce7cb6SWei Liu if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
1187b42cc6e4SZoltan Kiss /* If there was an error, xenvif_tx_check_gop is
1188b42cc6e4SZoltan Kiss * expected to release all the frags which were mapped,
1189b42cc6e4SZoltan Kiss * so kfree_skb shouldn't do it again
1190b42cc6e4SZoltan Kiss */
1191f942dc25SIan Campbell skb_shinfo(skb)->nr_frags = 0;
1192b42cc6e4SZoltan Kiss if (skb_has_frag_list(skb)) {
1193b42cc6e4SZoltan Kiss struct sk_buff *nskb =
1194b42cc6e4SZoltan Kiss skb_shinfo(skb)->frag_list;
1195b42cc6e4SZoltan Kiss skb_shinfo(nskb)->nr_frags = 0;
1196b42cc6e4SZoltan Kiss }
1197f942dc25SIan Campbell kfree_skb(skb);
1198f942dc25SIan Campbell continue;
1199f942dc25SIan Campbell }
1200f942dc25SIan Campbell
1201f942dc25SIan Campbell if (txp->flags & XEN_NETTXF_csum_blank)
1202f942dc25SIan Campbell skb->ip_summed = CHECKSUM_PARTIAL;
1203f942dc25SIan Campbell else if (txp->flags & XEN_NETTXF_data_validated)
1204f942dc25SIan Campbell skb->ip_summed = CHECKSUM_UNNECESSARY;
1205f942dc25SIan Campbell
1206e9ce7cb6SWei Liu xenvif_fill_frags(queue, skb);
1207f942dc25SIan Campbell
1208e3377f36SZoltan Kiss if (unlikely(skb_has_frag_list(skb))) {
120999e87f56SIgor Druzhinin struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
121099e87f56SIgor Druzhinin xenvif_skb_zerocopy_prepare(queue, nskb);
1211e9ce7cb6SWei Liu if (xenvif_handle_frag_list(queue, skb)) {
1212e3377f36SZoltan Kiss if (net_ratelimit())
1213e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
1214e3377f36SZoltan Kiss "Not enough memory to consolidate frag_list!\n");
1215a64bd934SWei Liu xenvif_skb_zerocopy_prepare(queue, skb);
1216e3377f36SZoltan Kiss kfree_skb(skb);
1217e3377f36SZoltan Kiss continue;
1218e3377f36SZoltan Kiss }
121999e87f56SIgor Druzhinin /* Copied all the bits from the frag list -- free it. */
122099e87f56SIgor Druzhinin skb_frag_list_init(skb);
122199e87f56SIgor Druzhinin kfree_skb(nskb);
1222e3377f36SZoltan Kiss }
1223e3377f36SZoltan Kiss
1224e9ce7cb6SWei Liu skb->dev = queue->vif->dev;
1225f942dc25SIan Campbell skb->protocol = eth_type_trans(skb, skb->dev);
1226f9ca8f74SJason Wang skb_reset_network_header(skb);
1227f942dc25SIan Campbell
1228e9ce7cb6SWei Liu if (checksum_setup(queue, skb)) {
1229e9ce7cb6SWei Liu netdev_dbg(queue->vif->dev,
1230f942dc25SIan Campbell "Can't setup checksum in net_tx_action\n");
1231f53c3fe8SZoltan Kiss /* We have to set this flag to trigger the callback */
1232f53c3fe8SZoltan Kiss if (skb_shinfo(skb)->destructor_arg)
1233a64bd934SWei Liu xenvif_skb_zerocopy_prepare(queue, skb);
1234f942dc25SIan Campbell kfree_skb(skb);
1235f942dc25SIan Campbell continue;
1236f942dc25SIan Campbell }
1237f942dc25SIan Campbell
1238d2aa125dSMaxim Mikityanskiy skb_probe_transport_header(skb);
1239f9ca8f74SJason Wang
1240b89587a7SPaul Durrant /* If the packet is GSO then we will have just set up the
1241b89587a7SPaul Durrant * transport header offset in checksum_setup so it's now
1242b89587a7SPaul Durrant * straightforward to calculate gso_segs.
1243b89587a7SPaul Durrant */
1244b89587a7SPaul Durrant if (skb_is_gso(skb)) {
1245d2aa125dSMaxim Mikityanskiy int mss, hdrlen;
1246d2aa125dSMaxim Mikityanskiy
1247d2aa125dSMaxim Mikityanskiy /* GSO implies having the L4 header. */
1248d2aa125dSMaxim Mikityanskiy WARN_ON_ONCE(!skb_transport_header_was_set(skb));
1249d2aa125dSMaxim Mikityanskiy if (unlikely(!skb_transport_header_was_set(skb))) {
1250d2aa125dSMaxim Mikityanskiy kfree_skb(skb);
1251d2aa125dSMaxim Mikityanskiy continue;
1252d2aa125dSMaxim Mikityanskiy }
1253d2aa125dSMaxim Mikityanskiy
1254d2aa125dSMaxim Mikityanskiy mss = skb_shinfo(skb)->gso_size;
1255504148feSEric Dumazet hdrlen = skb_tcp_all_headers(skb);
1256b89587a7SPaul Durrant
1257b89587a7SPaul Durrant skb_shinfo(skb)->gso_segs =
1258b89587a7SPaul Durrant DIV_ROUND_UP(skb->len - hdrlen, mss);
1259b89587a7SPaul Durrant }
1260b89587a7SPaul Durrant
1261e9ce7cb6SWei Liu queue->stats.rx_bytes += skb->len;
1262e9ce7cb6SWei Liu queue->stats.rx_packets++;
1263f942dc25SIan Campbell
1264b3f980bdSWei Liu work_done++;
1265b3f980bdSWei Liu
1266f53c3fe8SZoltan Kiss /* Set this flag right before netif_receive_skb, otherwise
1267f53c3fe8SZoltan Kiss * someone might think this packet already left netback, and
1268f53c3fe8SZoltan Kiss * do a skb_copy_ubufs while we are still in control of the
1269f53c3fe8SZoltan Kiss * skb. E.g. the __pskb_pull_tail earlier can do such thing.
1270f53c3fe8SZoltan Kiss */
12711bb332afSZoltan Kiss if (skb_shinfo(skb)->destructor_arg) {
1272a64bd934SWei Liu xenvif_skb_zerocopy_prepare(queue, skb);
1273e9ce7cb6SWei Liu queue->stats.tx_zerocopy_sent++;
12741bb332afSZoltan Kiss }
1275f53c3fe8SZoltan Kiss
1276b3f980bdSWei Liu netif_receive_skb(skb);
1277f942dc25SIan Campbell }
1278b3f980bdSWei Liu
1279b3f980bdSWei Liu return work_done;
1280f942dc25SIan Campbell }
1281f942dc25SIan Campbell
xenvif_zerocopy_callback(struct sk_buff * skb,struct ubuf_info * ubuf_base,bool zerocopy_success)12827ab4f16fSPavel Begunkov static void xenvif_zerocopy_callback(struct sk_buff *skb,
12837ab4f16fSPavel Begunkov struct ubuf_info *ubuf_base,
128436177832SJonathan Lemon bool zerocopy_success)
12853e2234b3SZoltan Kiss {
1286f53c3fe8SZoltan Kiss unsigned long flags;
1287f53c3fe8SZoltan Kiss pending_ring_idx_t index;
1288b63ca3e8SPavel Begunkov struct ubuf_info_msgzc *ubuf = uarg_to_msgzc(ubuf_base);
1289e9ce7cb6SWei Liu struct xenvif_queue *queue = ubuf_to_queue(ubuf);
1290f53c3fe8SZoltan Kiss
1291f53c3fe8SZoltan Kiss /* This is the only place where we grab this lock, to protect callbacks
1292f53c3fe8SZoltan Kiss * from each other.
1293f53c3fe8SZoltan Kiss */
1294e9ce7cb6SWei Liu spin_lock_irqsave(&queue->callback_lock, flags);
1295f53c3fe8SZoltan Kiss do {
1296f53c3fe8SZoltan Kiss u16 pending_idx = ubuf->desc;
1297b63ca3e8SPavel Begunkov ubuf = (struct ubuf_info_msgzc *) ubuf->ctx;
1298e9ce7cb6SWei Liu BUG_ON(queue->dealloc_prod - queue->dealloc_cons >=
1299f53c3fe8SZoltan Kiss MAX_PENDING_REQS);
1300e9ce7cb6SWei Liu index = pending_index(queue->dealloc_prod);
1301e9ce7cb6SWei Liu queue->dealloc_ring[index] = pending_idx;
1302f53c3fe8SZoltan Kiss /* Sync with xenvif_tx_dealloc_action:
1303f53c3fe8SZoltan Kiss * insert idx then incr producer.
1304f53c3fe8SZoltan Kiss */
1305f53c3fe8SZoltan Kiss smp_wmb();
1306e9ce7cb6SWei Liu queue->dealloc_prod++;
1307f53c3fe8SZoltan Kiss } while (ubuf);
1308e9ce7cb6SWei Liu spin_unlock_irqrestore(&queue->callback_lock, flags);
1309f53c3fe8SZoltan Kiss
13101bb332afSZoltan Kiss if (likely(zerocopy_success))
1311e9ce7cb6SWei Liu queue->stats.tx_zerocopy_success++;
13121bb332afSZoltan Kiss else
1313e9ce7cb6SWei Liu queue->stats.tx_zerocopy_fail++;
1314a64bd934SWei Liu xenvif_skb_zerocopy_complete(queue);
1315f53c3fe8SZoltan Kiss }
1316f53c3fe8SZoltan Kiss
13177ab4f16fSPavel Begunkov const struct ubuf_info_ops xenvif_ubuf_ops = {
13187ab4f16fSPavel Begunkov .complete = xenvif_zerocopy_callback,
13197ab4f16fSPavel Begunkov };
13207ab4f16fSPavel Begunkov
xenvif_tx_dealloc_action(struct xenvif_queue * queue)1321e9ce7cb6SWei Liu static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1322f53c3fe8SZoltan Kiss {
1323f53c3fe8SZoltan Kiss struct gnttab_unmap_grant_ref *gop;
1324f53c3fe8SZoltan Kiss pending_ring_idx_t dc, dp;
1325f53c3fe8SZoltan Kiss u16 pending_idx, pending_idx_release[MAX_PENDING_REQS];
1326f53c3fe8SZoltan Kiss unsigned int i = 0;
1327f53c3fe8SZoltan Kiss
1328e9ce7cb6SWei Liu dc = queue->dealloc_cons;
1329e9ce7cb6SWei Liu gop = queue->tx_unmap_ops;
1330f53c3fe8SZoltan Kiss
1331f53c3fe8SZoltan Kiss /* Free up any grants we have finished using */
1332f53c3fe8SZoltan Kiss do {
1333e9ce7cb6SWei Liu dp = queue->dealloc_prod;
1334f53c3fe8SZoltan Kiss
1335f53c3fe8SZoltan Kiss /* Ensure we see all indices enqueued by all
1336f53c3fe8SZoltan Kiss * xenvif_zerocopy_callback().
1337f53c3fe8SZoltan Kiss */
1338f53c3fe8SZoltan Kiss smp_rmb();
1339f53c3fe8SZoltan Kiss
1340f53c3fe8SZoltan Kiss while (dc != dp) {
134150c2e4ddSDan Carpenter BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1342f53c3fe8SZoltan Kiss pending_idx =
1343e9ce7cb6SWei Liu queue->dealloc_ring[pending_index(dc++)];
1344f53c3fe8SZoltan Kiss
1345e9ce7cb6SWei Liu pending_idx_release[gop - queue->tx_unmap_ops] =
1346f53c3fe8SZoltan Kiss pending_idx;
1347e9ce7cb6SWei Liu queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1348e9ce7cb6SWei Liu queue->mmap_pages[pending_idx];
1349f53c3fe8SZoltan Kiss gnttab_set_unmap_op(gop,
1350e9ce7cb6SWei Liu idx_to_kaddr(queue, pending_idx),
1351f53c3fe8SZoltan Kiss GNTMAP_host_map,
1352e9ce7cb6SWei Liu queue->grant_tx_handle[pending_idx]);
1353e9ce7cb6SWei Liu xenvif_grant_handle_reset(queue, pending_idx);
1354f53c3fe8SZoltan Kiss ++gop;
1355f53c3fe8SZoltan Kiss }
1356f53c3fe8SZoltan Kiss
1357e9ce7cb6SWei Liu } while (dp != queue->dealloc_prod);
1358f53c3fe8SZoltan Kiss
1359e9ce7cb6SWei Liu queue->dealloc_cons = dc;
1360f53c3fe8SZoltan Kiss
1361e9ce7cb6SWei Liu if (gop - queue->tx_unmap_ops > 0) {
1362f53c3fe8SZoltan Kiss int ret;
1363e9ce7cb6SWei Liu ret = gnttab_unmap_refs(queue->tx_unmap_ops,
1364f53c3fe8SZoltan Kiss NULL,
1365e9ce7cb6SWei Liu queue->pages_to_unmap,
1366e9ce7cb6SWei Liu gop - queue->tx_unmap_ops);
1367f53c3fe8SZoltan Kiss if (ret) {
136868946159SJulien Grall netdev_err(queue->vif->dev, "Unmap fail: nr_ops %tu ret %d\n",
1369e9ce7cb6SWei Liu gop - queue->tx_unmap_ops, ret);
1370e9ce7cb6SWei Liu for (i = 0; i < gop - queue->tx_unmap_ops; ++i) {
1371f53c3fe8SZoltan Kiss if (gop[i].status != GNTST_okay)
1372e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
137368946159SJulien Grall " host_addr: 0x%llx handle: 0x%x status: %d\n",
1374f53c3fe8SZoltan Kiss gop[i].host_addr,
1375f53c3fe8SZoltan Kiss gop[i].handle,
1376f53c3fe8SZoltan Kiss gop[i].status);
1377f53c3fe8SZoltan Kiss }
1378f53c3fe8SZoltan Kiss BUG();
1379f53c3fe8SZoltan Kiss }
1380f53c3fe8SZoltan Kiss }
1381f53c3fe8SZoltan Kiss
1382e9ce7cb6SWei Liu for (i = 0; i < gop - queue->tx_unmap_ops; ++i)
1383e9ce7cb6SWei Liu xenvif_idx_release(queue, pending_idx_release[i],
1384f53c3fe8SZoltan Kiss XEN_NETIF_RSP_OKAY);
1385f53c3fe8SZoltan Kiss }
1386f53c3fe8SZoltan Kiss
13873e2234b3SZoltan Kiss
1388f942dc25SIan Campbell /* Called after netfront has transmitted */
xenvif_tx_action(struct xenvif_queue * queue,int budget)1389e9ce7cb6SWei Liu int xenvif_tx_action(struct xenvif_queue *queue, int budget)
1390f942dc25SIan Campbell {
1391ad7f402aSRoss Lagerwall unsigned nr_mops = 0, nr_cops = 0;
1392f53c3fe8SZoltan Kiss int work_done, ret;
1393f942dc25SIan Campbell
1394e9ce7cb6SWei Liu if (unlikely(!tx_work_todo(queue)))
1395b3f980bdSWei Liu return 0;
1396b3f980bdSWei Liu
1397e9ce7cb6SWei Liu xenvif_tx_build_gops(queue, budget, &nr_cops, &nr_mops);
1398f942dc25SIan Campbell
1399bdab8275SZoltan Kiss if (nr_cops == 0)
1400b3f980bdSWei Liu return 0;
1401c571898fSAndres Lagar-Cavilla
1402e9ce7cb6SWei Liu gnttab_batch_copy(queue->tx_copy_ops, nr_cops);
14032991397dSJan Beulich if (nr_mops != 0) {
1404e9ce7cb6SWei Liu ret = gnttab_map_refs(queue->tx_map_ops,
1405f53c3fe8SZoltan Kiss NULL,
1406e9ce7cb6SWei Liu queue->pages_to_map,
14079074ce24SZoltan Kiss nr_mops);
14082991397dSJan Beulich if (ret) {
14092991397dSJan Beulich unsigned int i;
14102991397dSJan Beulich
14112991397dSJan Beulich netdev_err(queue->vif->dev, "Map fail: nr %u ret %d\n",
14122991397dSJan Beulich nr_mops, ret);
14132991397dSJan Beulich for (i = 0; i < nr_mops; ++i)
14142991397dSJan Beulich WARN_ON_ONCE(queue->tx_map_ops[i].status ==
14152991397dSJan Beulich GNTST_okay);
14162991397dSJan Beulich }
14172991397dSJan Beulich }
1418f942dc25SIan Campbell
1419e9ce7cb6SWei Liu work_done = xenvif_tx_submit(queue);
1420b3f980bdSWei Liu
1421b3f980bdSWei Liu return work_done;
1422f942dc25SIan Campbell }
1423f942dc25SIan Campbell
_make_tx_response(struct xenvif_queue * queue,const struct xen_netif_tx_request * txp,unsigned int extra_count,s8 status)14247b55984cSJan Beulich static void _make_tx_response(struct xenvif_queue *queue,
14257b55984cSJan Beulich const struct xen_netif_tx_request *txp,
1426562abd39SPaul Durrant unsigned int extra_count,
14277b55984cSJan Beulich s8 status)
1428f942dc25SIan Campbell {
1429e9ce7cb6SWei Liu RING_IDX i = queue->tx.rsp_prod_pvt;
1430f942dc25SIan Campbell struct xen_netif_tx_response *resp;
1431f942dc25SIan Campbell
1432e9ce7cb6SWei Liu resp = RING_GET_RESPONSE(&queue->tx, i);
1433f942dc25SIan Campbell resp->id = txp->id;
14347b55984cSJan Beulich resp->status = status;
1435f942dc25SIan Campbell
1436562abd39SPaul Durrant while (extra_count-- != 0)
1437e9ce7cb6SWei Liu RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL;
1438f942dc25SIan Campbell
1439e9ce7cb6SWei Liu queue->tx.rsp_prod_pvt = ++i;
1440f942dc25SIan Campbell }
1441f942dc25SIan Campbell
push_tx_responses(struct xenvif_queue * queue)1442c8a4d299SDavid Vrabel static void push_tx_responses(struct xenvif_queue *queue)
1443c8a4d299SDavid Vrabel {
1444c8a4d299SDavid Vrabel int notify;
1445c8a4d299SDavid Vrabel
1446c8a4d299SDavid Vrabel RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify);
1447c8a4d299SDavid Vrabel if (notify)
1448c8a4d299SDavid Vrabel notify_remote_via_irq(queue->tx_irq);
1449c8a4d299SDavid Vrabel }
1450c8a4d299SDavid Vrabel
xenvif_idx_release(struct xenvif_queue * queue,u16 pending_idx,s8 status)14517b55984cSJan Beulich static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
14527b55984cSJan Beulich s8 status)
14537b55984cSJan Beulich {
14547b55984cSJan Beulich struct pending_tx_info *pending_tx_info;
14557b55984cSJan Beulich pending_ring_idx_t index;
14567b55984cSJan Beulich unsigned long flags;
14577b55984cSJan Beulich
14587b55984cSJan Beulich pending_tx_info = &queue->pending_tx_info[pending_idx];
14597b55984cSJan Beulich
14607b55984cSJan Beulich spin_lock_irqsave(&queue->response_lock, flags);
14617b55984cSJan Beulich
14627b55984cSJan Beulich _make_tx_response(queue, &pending_tx_info->req,
14637b55984cSJan Beulich pending_tx_info->extra_count, status);
14647b55984cSJan Beulich
14657b55984cSJan Beulich /* Release the pending index before pusing the Tx response so
14667b55984cSJan Beulich * its available before a new Tx request is pushed by the
14677b55984cSJan Beulich * frontend.
14687b55984cSJan Beulich */
14697b55984cSJan Beulich index = pending_index(queue->pending_prod++);
14707b55984cSJan Beulich queue->pending_ring[index] = pending_idx;
14717b55984cSJan Beulich
14727b55984cSJan Beulich push_tx_responses(queue);
14737b55984cSJan Beulich
14747b55984cSJan Beulich spin_unlock_irqrestore(&queue->response_lock, flags);
14757b55984cSJan Beulich }
14767b55984cSJan Beulich
make_tx_response(struct xenvif_queue * queue,const struct xen_netif_tx_request * txp,unsigned int extra_count,s8 status)14777b55984cSJan Beulich static void make_tx_response(struct xenvif_queue *queue,
14787b55984cSJan Beulich const struct xen_netif_tx_request *txp,
14797b55984cSJan Beulich unsigned int extra_count,
14807b55984cSJan Beulich s8 status)
14817b55984cSJan Beulich {
14827b55984cSJan Beulich unsigned long flags;
14837b55984cSJan Beulich
14847b55984cSJan Beulich spin_lock_irqsave(&queue->response_lock, flags);
14857b55984cSJan Beulich
14867b55984cSJan Beulich _make_tx_response(queue, txp, extra_count, status);
14877b55984cSJan Beulich push_tx_responses(queue);
14887b55984cSJan Beulich
14897b55984cSJan Beulich spin_unlock_irqrestore(&queue->response_lock, flags);
14907b55984cSJan Beulich }
14917b55984cSJan Beulich
xenvif_idx_unmap(struct xenvif_queue * queue,u16 pending_idx)14925834e72eSJuergen Gross static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1493f53c3fe8SZoltan Kiss {
1494f53c3fe8SZoltan Kiss int ret;
1495f53c3fe8SZoltan Kiss struct gnttab_unmap_grant_ref tx_unmap_op;
1496f53c3fe8SZoltan Kiss
1497f53c3fe8SZoltan Kiss gnttab_set_unmap_op(&tx_unmap_op,
1498e9ce7cb6SWei Liu idx_to_kaddr(queue, pending_idx),
1499f53c3fe8SZoltan Kiss GNTMAP_host_map,
1500e9ce7cb6SWei Liu queue->grant_tx_handle[pending_idx]);
1501e9ce7cb6SWei Liu xenvif_grant_handle_reset(queue, pending_idx);
1502f53c3fe8SZoltan Kiss
1503f53c3fe8SZoltan Kiss ret = gnttab_unmap_refs(&tx_unmap_op, NULL,
1504e9ce7cb6SWei Liu &queue->mmap_pages[pending_idx], 1);
15057aceb47aSZoltan Kiss if (ret) {
1506e9ce7cb6SWei Liu netdev_err(queue->vif->dev,
150768946159SJulien Grall "Unmap fail: ret: %d pending_idx: %d host_addr: %llx handle: 0x%x status: %d\n",
15087aceb47aSZoltan Kiss ret,
15097aceb47aSZoltan Kiss pending_idx,
15107aceb47aSZoltan Kiss tx_unmap_op.host_addr,
15117aceb47aSZoltan Kiss tx_unmap_op.handle,
15127aceb47aSZoltan Kiss tx_unmap_op.status);
15137aceb47aSZoltan Kiss BUG();
15147aceb47aSZoltan Kiss }
1515f53c3fe8SZoltan Kiss }
1516f53c3fe8SZoltan Kiss
tx_work_todo(struct xenvif_queue * queue)1517e9ce7cb6SWei Liu static inline int tx_work_todo(struct xenvif_queue *queue)
1518f942dc25SIan Campbell {
1519e9ce7cb6SWei Liu if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
1520f942dc25SIan Campbell return 1;
1521f942dc25SIan Campbell
1522f942dc25SIan Campbell return 0;
1523f942dc25SIan Campbell }
1524f942dc25SIan Campbell
tx_dealloc_work_todo(struct xenvif_queue * queue)1525e9ce7cb6SWei Liu static inline bool tx_dealloc_work_todo(struct xenvif_queue *queue)
1526f53c3fe8SZoltan Kiss {
1527e9ce7cb6SWei Liu return queue->dealloc_cons != queue->dealloc_prod;
1528f53c3fe8SZoltan Kiss }
1529f53c3fe8SZoltan Kiss
xenvif_unmap_frontend_data_rings(struct xenvif_queue * queue)15304e15ee2cSPaul Durrant void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue)
1531f942dc25SIan Campbell {
1532e9ce7cb6SWei Liu if (queue->tx.sring)
1533e9ce7cb6SWei Liu xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1534e9ce7cb6SWei Liu queue->tx.sring);
1535e9ce7cb6SWei Liu if (queue->rx.sring)
1536e9ce7cb6SWei Liu xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(queue->vif),
1537e9ce7cb6SWei Liu queue->rx.sring);
1538f942dc25SIan Campbell }
1539f942dc25SIan Campbell
xenvif_map_frontend_data_rings(struct xenvif_queue * queue,grant_ref_t tx_ring_ref,grant_ref_t rx_ring_ref)15404e15ee2cSPaul Durrant int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
1541f942dc25SIan Campbell grant_ref_t tx_ring_ref,
1542f942dc25SIan Campbell grant_ref_t rx_ring_ref)
1543f942dc25SIan Campbell {
1544c9d63699SDavid Vrabel void *addr;
1545f942dc25SIan Campbell struct xen_netif_tx_sring *txs;
1546f942dc25SIan Campbell struct xen_netif_rx_sring *rxs;
15479476654bSPaul Durrant RING_IDX rsp_prod, req_prod;
1548bacc8dafSColin Ian King int err;
1549f942dc25SIan Campbell
1550e9ce7cb6SWei Liu err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1551ccc9d90aSWei Liu &tx_ring_ref, 1, &addr);
1552c9d63699SDavid Vrabel if (err)
1553f942dc25SIan Campbell goto err;
1554f942dc25SIan Campbell
1555c9d63699SDavid Vrabel txs = (struct xen_netif_tx_sring *)addr;
15569476654bSPaul Durrant rsp_prod = READ_ONCE(txs->rsp_prod);
15579476654bSPaul Durrant req_prod = READ_ONCE(txs->req_prod);
15589476654bSPaul Durrant
15599476654bSPaul Durrant BACK_RING_ATTACH(&queue->tx, txs, rsp_prod, XEN_PAGE_SIZE);
15609476654bSPaul Durrant
15619476654bSPaul Durrant err = -EIO;
15629476654bSPaul Durrant if (req_prod - rsp_prod > RING_SIZE(&queue->tx))
15639476654bSPaul Durrant goto err;
1564f942dc25SIan Campbell
1565e9ce7cb6SWei Liu err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
1566ccc9d90aSWei Liu &rx_ring_ref, 1, &addr);
1567c9d63699SDavid Vrabel if (err)
1568f942dc25SIan Campbell goto err;
1569f942dc25SIan Campbell
1570c9d63699SDavid Vrabel rxs = (struct xen_netif_rx_sring *)addr;
15719476654bSPaul Durrant rsp_prod = READ_ONCE(rxs->rsp_prod);
15729476654bSPaul Durrant req_prod = READ_ONCE(rxs->req_prod);
15739476654bSPaul Durrant
15749476654bSPaul Durrant BACK_RING_ATTACH(&queue->rx, rxs, rsp_prod, XEN_PAGE_SIZE);
15759476654bSPaul Durrant
15769476654bSPaul Durrant err = -EIO;
15779476654bSPaul Durrant if (req_prod - rsp_prod > RING_SIZE(&queue->rx))
15789476654bSPaul Durrant goto err;
1579f942dc25SIan Campbell
1580f942dc25SIan Campbell return 0;
1581f942dc25SIan Campbell
1582f942dc25SIan Campbell err:
15834e15ee2cSPaul Durrant xenvif_unmap_frontend_data_rings(queue);
1584f942dc25SIan Campbell return err;
1585f942dc25SIan Campbell }
1586f942dc25SIan Campbell
xenvif_dealloc_kthread_should_stop(struct xenvif_queue * queue)1587a64bd934SWei Liu static bool xenvif_dealloc_kthread_should_stop(struct xenvif_queue *queue)
1588a64bd934SWei Liu {
1589a64bd934SWei Liu /* Dealloc thread must remain running until all inflight
1590a64bd934SWei Liu * packets complete.
1591a64bd934SWei Liu */
1592a64bd934SWei Liu return kthread_should_stop() &&
1593a64bd934SWei Liu !atomic_read(&queue->inflight_packets);
1594a64bd934SWei Liu }
1595a64bd934SWei Liu
xenvif_dealloc_kthread(void * data)1596f53c3fe8SZoltan Kiss int xenvif_dealloc_kthread(void *data)
1597f53c3fe8SZoltan Kiss {
1598e9ce7cb6SWei Liu struct xenvif_queue *queue = data;
1599f53c3fe8SZoltan Kiss
1600a64bd934SWei Liu for (;;) {
1601e9ce7cb6SWei Liu wait_event_interruptible(queue->dealloc_wq,
1602e9ce7cb6SWei Liu tx_dealloc_work_todo(queue) ||
1603a64bd934SWei Liu xenvif_dealloc_kthread_should_stop(queue));
1604a64bd934SWei Liu if (xenvif_dealloc_kthread_should_stop(queue))
1605f53c3fe8SZoltan Kiss break;
1606f53c3fe8SZoltan Kiss
1607e9ce7cb6SWei Liu xenvif_tx_dealloc_action(queue);
1608f53c3fe8SZoltan Kiss cond_resched();
1609f53c3fe8SZoltan Kiss }
1610f53c3fe8SZoltan Kiss
1611f53c3fe8SZoltan Kiss /* Unmap anything remaining*/
1612e9ce7cb6SWei Liu if (tx_dealloc_work_todo(queue))
1613e9ce7cb6SWei Liu xenvif_tx_dealloc_action(queue);
1614f53c3fe8SZoltan Kiss
1615f53c3fe8SZoltan Kiss return 0;
1616f53c3fe8SZoltan Kiss }
1617f53c3fe8SZoltan Kiss
make_ctrl_response(struct xenvif * vif,const struct xen_netif_ctrl_request * req,u32 status,u32 data)16184e15ee2cSPaul Durrant static void make_ctrl_response(struct xenvif *vif,
16194e15ee2cSPaul Durrant const struct xen_netif_ctrl_request *req,
16204e15ee2cSPaul Durrant u32 status, u32 data)
16214e15ee2cSPaul Durrant {
16224e15ee2cSPaul Durrant RING_IDX idx = vif->ctrl.rsp_prod_pvt;
16234e15ee2cSPaul Durrant struct xen_netif_ctrl_response rsp = {
16244e15ee2cSPaul Durrant .id = req->id,
16254e15ee2cSPaul Durrant .type = req->type,
16264e15ee2cSPaul Durrant .status = status,
16274e15ee2cSPaul Durrant .data = data,
16284e15ee2cSPaul Durrant };
16294e15ee2cSPaul Durrant
16304e15ee2cSPaul Durrant *RING_GET_RESPONSE(&vif->ctrl, idx) = rsp;
16314e15ee2cSPaul Durrant vif->ctrl.rsp_prod_pvt = ++idx;
16324e15ee2cSPaul Durrant }
16334e15ee2cSPaul Durrant
push_ctrl_response(struct xenvif * vif)16344e15ee2cSPaul Durrant static void push_ctrl_response(struct xenvif *vif)
16354e15ee2cSPaul Durrant {
16364e15ee2cSPaul Durrant int notify;
16374e15ee2cSPaul Durrant
16384e15ee2cSPaul Durrant RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->ctrl, notify);
16394e15ee2cSPaul Durrant if (notify)
16404e15ee2cSPaul Durrant notify_remote_via_irq(vif->ctrl_irq);
16414e15ee2cSPaul Durrant }
16424e15ee2cSPaul Durrant
process_ctrl_request(struct xenvif * vif,const struct xen_netif_ctrl_request * req)16434e15ee2cSPaul Durrant static void process_ctrl_request(struct xenvif *vif,
16444e15ee2cSPaul Durrant const struct xen_netif_ctrl_request *req)
16454e15ee2cSPaul Durrant {
164640d8abdeSPaul Durrant u32 status = XEN_NETIF_CTRL_STATUS_NOT_SUPPORTED;
164740d8abdeSPaul Durrant u32 data = 0;
164840d8abdeSPaul Durrant
164940d8abdeSPaul Durrant switch (req->type) {
165040d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_ALGORITHM:
165140d8abdeSPaul Durrant status = xenvif_set_hash_alg(vif, req->data[0]);
165240d8abdeSPaul Durrant break;
165340d8abdeSPaul Durrant
165440d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_GET_HASH_FLAGS:
165540d8abdeSPaul Durrant status = xenvif_get_hash_flags(vif, &data);
165640d8abdeSPaul Durrant break;
165740d8abdeSPaul Durrant
165840d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_FLAGS:
165940d8abdeSPaul Durrant status = xenvif_set_hash_flags(vif, req->data[0]);
166040d8abdeSPaul Durrant break;
166140d8abdeSPaul Durrant
166240d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_KEY:
166340d8abdeSPaul Durrant status = xenvif_set_hash_key(vif, req->data[0],
166440d8abdeSPaul Durrant req->data[1]);
166540d8abdeSPaul Durrant break;
166640d8abdeSPaul Durrant
166740d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_GET_HASH_MAPPING_SIZE:
166840d8abdeSPaul Durrant status = XEN_NETIF_CTRL_STATUS_SUCCESS;
166940d8abdeSPaul Durrant data = XEN_NETBK_MAX_HASH_MAPPING_SIZE;
167040d8abdeSPaul Durrant break;
167140d8abdeSPaul Durrant
167240d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING_SIZE:
167340d8abdeSPaul Durrant status = xenvif_set_hash_mapping_size(vif,
167440d8abdeSPaul Durrant req->data[0]);
167540d8abdeSPaul Durrant break;
167640d8abdeSPaul Durrant
167740d8abdeSPaul Durrant case XEN_NETIF_CTRL_TYPE_SET_HASH_MAPPING:
167840d8abdeSPaul Durrant status = xenvif_set_hash_mapping(vif, req->data[0],
167940d8abdeSPaul Durrant req->data[1],
168040d8abdeSPaul Durrant req->data[2]);
168140d8abdeSPaul Durrant break;
168240d8abdeSPaul Durrant
168340d8abdeSPaul Durrant default:
168440d8abdeSPaul Durrant break;
168540d8abdeSPaul Durrant }
168640d8abdeSPaul Durrant
168740d8abdeSPaul Durrant make_ctrl_response(vif, req, status, data);
16884e15ee2cSPaul Durrant push_ctrl_response(vif);
16894e15ee2cSPaul Durrant }
16904e15ee2cSPaul Durrant
xenvif_ctrl_action(struct xenvif * vif)16914e15ee2cSPaul Durrant static void xenvif_ctrl_action(struct xenvif *vif)
16924e15ee2cSPaul Durrant {
16934e15ee2cSPaul Durrant for (;;) {
16944e15ee2cSPaul Durrant RING_IDX req_prod, req_cons;
16954e15ee2cSPaul Durrant
16964e15ee2cSPaul Durrant req_prod = vif->ctrl.sring->req_prod;
16974e15ee2cSPaul Durrant req_cons = vif->ctrl.req_cons;
16984e15ee2cSPaul Durrant
16994e15ee2cSPaul Durrant /* Make sure we can see requests before we process them. */
17004e15ee2cSPaul Durrant rmb();
17014e15ee2cSPaul Durrant
17024e15ee2cSPaul Durrant if (req_cons == req_prod)
17034e15ee2cSPaul Durrant break;
17044e15ee2cSPaul Durrant
17054e15ee2cSPaul Durrant while (req_cons != req_prod) {
17064e15ee2cSPaul Durrant struct xen_netif_ctrl_request req;
17074e15ee2cSPaul Durrant
17084e15ee2cSPaul Durrant RING_COPY_REQUEST(&vif->ctrl, req_cons, &req);
17094e15ee2cSPaul Durrant req_cons++;
17104e15ee2cSPaul Durrant
17114e15ee2cSPaul Durrant process_ctrl_request(vif, &req);
17124e15ee2cSPaul Durrant }
17134e15ee2cSPaul Durrant
17144e15ee2cSPaul Durrant vif->ctrl.req_cons = req_cons;
17154e15ee2cSPaul Durrant vif->ctrl.sring->req_event = req_cons + 1;
17164e15ee2cSPaul Durrant }
17174e15ee2cSPaul Durrant }
17184e15ee2cSPaul Durrant
xenvif_ctrl_work_todo(struct xenvif * vif)17194e15ee2cSPaul Durrant static bool xenvif_ctrl_work_todo(struct xenvif *vif)
17204e15ee2cSPaul Durrant {
17214e15ee2cSPaul Durrant if (likely(RING_HAS_UNCONSUMED_REQUESTS(&vif->ctrl)))
1722d3e2a25bSGustavo A. R. Silva return true;
17234e15ee2cSPaul Durrant
1724d3e2a25bSGustavo A. R. Silva return false;
17254e15ee2cSPaul Durrant }
17264e15ee2cSPaul Durrant
xenvif_ctrl_irq_fn(int irq,void * data)17270364a882SJuergen Gross irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data)
17284e15ee2cSPaul Durrant {
17294e15ee2cSPaul Durrant struct xenvif *vif = data;
173023025393SJuergen Gross unsigned int eoi_flag = XEN_EOI_FLAG_SPURIOUS;
17314e15ee2cSPaul Durrant
173223025393SJuergen Gross while (xenvif_ctrl_work_todo(vif)) {
17334e15ee2cSPaul Durrant xenvif_ctrl_action(vif);
173423025393SJuergen Gross eoi_flag = 0;
173523025393SJuergen Gross }
173623025393SJuergen Gross
173723025393SJuergen Gross xen_irq_lateeoi(irq, eoi_flag);
17384e15ee2cSPaul Durrant
17390364a882SJuergen Gross return IRQ_HANDLED;
17404e15ee2cSPaul Durrant }
17414e15ee2cSPaul Durrant
netback_init(void)1742f942dc25SIan Campbell static int __init netback_init(void)
1743f942dc25SIan Campbell {
1744f942dc25SIan Campbell int rc = 0;
1745f942dc25SIan Campbell
17462a14b244SDaniel De Graaf if (!xen_domain())
1747f942dc25SIan Campbell return -ENODEV;
1748f942dc25SIan Campbell
174956dd5af9SJuergen Gross /* Allow as many queues as there are CPUs but max. 8 if user has not
17504c82ac3cSWei Liu * specified a value.
17514c82ac3cSWei Liu */
17524c82ac3cSWei Liu if (xenvif_max_queues == 0)
175356dd5af9SJuergen Gross xenvif_max_queues = min_t(unsigned int, MAX_QUEUES_DEFAULT,
175456dd5af9SJuergen Gross num_online_cpus());
17558d3d53b3SAndrew J. Bennieston
175637641494SWei Liu if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
1757383eda32SJoe Perches pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
175837641494SWei Liu fatal_skb_slots, XEN_NETBK_LEGACY_SLOTS_MAX);
175937641494SWei Liu fatal_skb_slots = XEN_NETBK_LEGACY_SLOTS_MAX;
17602810e5b9SWei Liu }
17612810e5b9SWei Liu
1762f942dc25SIan Campbell rc = xenvif_xenbus_init();
1763f942dc25SIan Campbell if (rc)
1764f942dc25SIan Campbell goto failed_init;
1765f942dc25SIan Campbell
1766f51de243SZoltan Kiss #ifdef CONFIG_DEBUG_FS
1767f51de243SZoltan Kiss xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
1768f51de243SZoltan Kiss #endif /* CONFIG_DEBUG_FS */
1769f51de243SZoltan Kiss
1770f942dc25SIan Campbell return 0;
1771f942dc25SIan Campbell
1772f942dc25SIan Campbell failed_init:
1773f942dc25SIan Campbell return rc;
1774f942dc25SIan Campbell }
1775f942dc25SIan Campbell
1776f942dc25SIan Campbell module_init(netback_init);
1777f942dc25SIan Campbell
netback_fini(void)1778b103f358SWei Liu static void __exit netback_fini(void)
1779b103f358SWei Liu {
1780f51de243SZoltan Kiss #ifdef CONFIG_DEBUG_FS
1781f51de243SZoltan Kiss debugfs_remove_recursive(xen_netback_dbg_root);
1782f51de243SZoltan Kiss #endif /* CONFIG_DEBUG_FS */
1783b103f358SWei Liu xenvif_xenbus_fini();
1784b103f358SWei Liu }
1785b103f358SWei Liu module_exit(netback_fini);
1786b103f358SWei Liu
17875b8e3464SBreno Leitao MODULE_DESCRIPTION("Xen backend network device module");
1788f942dc25SIan Campbell MODULE_LICENSE("Dual BSD/GPL");
1789f984cec6SBastian Blank MODULE_ALIAS("xen-backend:vif");
1790