xref: /linux/drivers/net/ethernet/sfc/rx.c (revision 4a74dc65e3ad825a66dfbcb256f98c550f96445b)
1874aeea5SJeff Kirsher /****************************************************************************
2874aeea5SJeff Kirsher  * Driver for Solarflare Solarstorm network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4874aeea5SJeff Kirsher  * Copyright 2005-2011 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/socket.h>
12874aeea5SJeff Kirsher #include <linux/in.h>
13874aeea5SJeff Kirsher #include <linux/slab.h>
14874aeea5SJeff Kirsher #include <linux/ip.h>
15874aeea5SJeff Kirsher #include <linux/tcp.h>
16874aeea5SJeff Kirsher #include <linux/udp.h>
17874aeea5SJeff Kirsher #include <linux/prefetch.h>
186eb07cafSPaul Gortmaker #include <linux/moduleparam.h>
19874aeea5SJeff Kirsher #include <net/ip.h>
20874aeea5SJeff Kirsher #include <net/checksum.h>
21874aeea5SJeff Kirsher #include "net_driver.h"
22874aeea5SJeff Kirsher #include "efx.h"
23874aeea5SJeff Kirsher #include "nic.h"
24874aeea5SJeff Kirsher #include "selftest.h"
25874aeea5SJeff Kirsher #include "workarounds.h"
26874aeea5SJeff Kirsher 
27874aeea5SJeff Kirsher /* Number of RX descriptors pushed at once. */
28874aeea5SJeff Kirsher #define EFX_RX_BATCH  8
29874aeea5SJeff Kirsher 
30874aeea5SJeff Kirsher /* Maximum size of a buffer sharing a page */
31874aeea5SJeff Kirsher #define EFX_RX_HALF_PAGE ((PAGE_SIZE >> 1) - sizeof(struct efx_rx_page_state))
32874aeea5SJeff Kirsher 
33874aeea5SJeff Kirsher /* Size of buffer allocated for skb header area. */
34874aeea5SJeff Kirsher #define EFX_SKB_HEADERS  64u
35874aeea5SJeff Kirsher 
36874aeea5SJeff Kirsher /*
37874aeea5SJeff Kirsher  * rx_alloc_method - RX buffer allocation method
38874aeea5SJeff Kirsher  *
39874aeea5SJeff Kirsher  * This driver supports two methods for allocating and using RX buffers:
40874aeea5SJeff Kirsher  * each RX buffer may be backed by an skb or by an order-n page.
41874aeea5SJeff Kirsher  *
42874aeea5SJeff Kirsher  * When GRO is in use then the second method has a lower overhead,
43874aeea5SJeff Kirsher  * since we don't have to allocate then free skbs on reassembled frames.
44874aeea5SJeff Kirsher  *
45874aeea5SJeff Kirsher  * Values:
46874aeea5SJeff Kirsher  *   - RX_ALLOC_METHOD_AUTO = 0
47874aeea5SJeff Kirsher  *   - RX_ALLOC_METHOD_SKB  = 1
48874aeea5SJeff Kirsher  *   - RX_ALLOC_METHOD_PAGE = 2
49874aeea5SJeff Kirsher  *
50874aeea5SJeff Kirsher  * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count
51874aeea5SJeff Kirsher  * controlled by the parameters below.
52874aeea5SJeff Kirsher  *
53874aeea5SJeff Kirsher  *   - Since pushing and popping descriptors are separated by the rx_queue
54874aeea5SJeff Kirsher  *     size, so the watermarks should be ~rxd_size.
55874aeea5SJeff Kirsher  *   - The performance win by using page-based allocation for GRO is less
56874aeea5SJeff Kirsher  *     than the performance hit of using page-based allocation of non-GRO,
57874aeea5SJeff Kirsher  *     so the watermarks should reflect this.
58874aeea5SJeff Kirsher  *
59874aeea5SJeff Kirsher  * Per channel we maintain a single variable, updated by each channel:
60874aeea5SJeff Kirsher  *
61874aeea5SJeff Kirsher  *   rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
62874aeea5SJeff Kirsher  *                      RX_ALLOC_FACTOR_SKB)
63874aeea5SJeff Kirsher  * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
64874aeea5SJeff Kirsher  * limits the hysteresis), and update the allocation strategy:
65874aeea5SJeff Kirsher  *
66874aeea5SJeff Kirsher  *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
67874aeea5SJeff Kirsher  *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
68874aeea5SJeff Kirsher  */
69874aeea5SJeff Kirsher static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
70874aeea5SJeff Kirsher 
71874aeea5SJeff Kirsher #define RX_ALLOC_LEVEL_GRO 0x2000
72874aeea5SJeff Kirsher #define RX_ALLOC_LEVEL_MAX 0x3000
73874aeea5SJeff Kirsher #define RX_ALLOC_FACTOR_GRO 1
74874aeea5SJeff Kirsher #define RX_ALLOC_FACTOR_SKB (-2)
75874aeea5SJeff Kirsher 
76874aeea5SJeff Kirsher /* This is the percentage fill level below which new RX descriptors
77874aeea5SJeff Kirsher  * will be added to the RX descriptor ring.
78874aeea5SJeff Kirsher  */
7964235187SDavid Riddoch static unsigned int rx_refill_threshold;
80874aeea5SJeff Kirsher 
81874aeea5SJeff Kirsher /*
82874aeea5SJeff Kirsher  * RX maximum head room required.
83874aeea5SJeff Kirsher  *
84874aeea5SJeff Kirsher  * This must be at least 1 to prevent overflow and at least 2 to allow
85874aeea5SJeff Kirsher  * pipelined receives.
86874aeea5SJeff Kirsher  */
87874aeea5SJeff Kirsher #define EFX_RXD_HEAD_ROOM 2
88874aeea5SJeff Kirsher 
89874aeea5SJeff Kirsher /* Offset of ethernet header within page */
90874aeea5SJeff Kirsher static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
91874aeea5SJeff Kirsher 					     struct efx_rx_buffer *buf)
92874aeea5SJeff Kirsher {
93b590ace0SBen Hutchings 	return buf->page_offset + efx->type->rx_buffer_hash_size;
94874aeea5SJeff Kirsher }
95874aeea5SJeff Kirsher static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
96874aeea5SJeff Kirsher {
97874aeea5SJeff Kirsher 	return PAGE_SIZE << efx->rx_buffer_order;
98874aeea5SJeff Kirsher }
99874aeea5SJeff Kirsher 
100874aeea5SJeff Kirsher static u8 *efx_rx_buf_eh(struct efx_nic *efx, struct efx_rx_buffer *buf)
101874aeea5SJeff Kirsher {
102db339569SBen Hutchings 	if (buf->flags & EFX_RX_BUF_PAGE)
103874aeea5SJeff Kirsher 		return page_address(buf->u.page) + efx_rx_buf_offset(efx, buf);
104874aeea5SJeff Kirsher 	else
1050beaca2cSBen Hutchings 		return (u8 *)buf->u.skb->data + efx->type->rx_buffer_hash_size;
106874aeea5SJeff Kirsher }
107874aeea5SJeff Kirsher 
108874aeea5SJeff Kirsher static inline u32 efx_rx_buf_hash(const u8 *eh)
109874aeea5SJeff Kirsher {
110874aeea5SJeff Kirsher 	/* The ethernet header is always directly after any hash. */
111874aeea5SJeff Kirsher #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) || NET_IP_ALIGN % 4 == 0
112874aeea5SJeff Kirsher 	return __le32_to_cpup((const __le32 *)(eh - 4));
113874aeea5SJeff Kirsher #else
114874aeea5SJeff Kirsher 	const u8 *data = eh - 4;
1150beaca2cSBen Hutchings 	return (u32)data[0]	  |
116874aeea5SJeff Kirsher 	       (u32)data[1] << 8  |
117874aeea5SJeff Kirsher 	       (u32)data[2] << 16 |
1180beaca2cSBen Hutchings 	       (u32)data[3] << 24;
119874aeea5SJeff Kirsher #endif
120874aeea5SJeff Kirsher }
121874aeea5SJeff Kirsher 
122874aeea5SJeff Kirsher /**
123874aeea5SJeff Kirsher  * efx_init_rx_buffers_skb - create EFX_RX_BATCH skb-based RX buffers
124874aeea5SJeff Kirsher  *
125874aeea5SJeff Kirsher  * @rx_queue:		Efx RX queue
126874aeea5SJeff Kirsher  *
127874aeea5SJeff Kirsher  * This allocates EFX_RX_BATCH skbs, maps them for DMA, and populates a
128874aeea5SJeff Kirsher  * struct efx_rx_buffer for each one. Return a negative error code or 0
129874aeea5SJeff Kirsher  * on success. May fail having only inserted fewer than EFX_RX_BATCH
130874aeea5SJeff Kirsher  * buffers.
131874aeea5SJeff Kirsher  */
132874aeea5SJeff Kirsher static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
133874aeea5SJeff Kirsher {
134874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
135874aeea5SJeff Kirsher 	struct net_device *net_dev = efx->net_dev;
136874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
137874aeea5SJeff Kirsher 	struct sk_buff *skb;
138874aeea5SJeff Kirsher 	int skb_len = efx->rx_buffer_len;
139874aeea5SJeff Kirsher 	unsigned index, count;
140874aeea5SJeff Kirsher 
141874aeea5SJeff Kirsher 	for (count = 0; count < EFX_RX_BATCH; ++count) {
142874aeea5SJeff Kirsher 		index = rx_queue->added_count & rx_queue->ptr_mask;
143874aeea5SJeff Kirsher 		rx_buf = efx_rx_buffer(rx_queue, index);
144874aeea5SJeff Kirsher 
145874aeea5SJeff Kirsher 		rx_buf->u.skb = skb = netdev_alloc_skb(net_dev, skb_len);
146874aeea5SJeff Kirsher 		if (unlikely(!skb))
147874aeea5SJeff Kirsher 			return -ENOMEM;
148874aeea5SJeff Kirsher 
149ff3bc1e7SBen Hutchings 		/* Adjust the SKB for padding */
150874aeea5SJeff Kirsher 		skb_reserve(skb, NET_IP_ALIGN);
151874aeea5SJeff Kirsher 		rx_buf->len = skb_len - NET_IP_ALIGN;
152db339569SBen Hutchings 		rx_buf->flags = 0;
153874aeea5SJeff Kirsher 
1540e33d870SBen Hutchings 		rx_buf->dma_addr = dma_map_single(&efx->pci_dev->dev,
155874aeea5SJeff Kirsher 						  skb->data, rx_buf->len,
1560e33d870SBen Hutchings 						  DMA_FROM_DEVICE);
1570e33d870SBen Hutchings 		if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
158874aeea5SJeff Kirsher 					       rx_buf->dma_addr))) {
159874aeea5SJeff Kirsher 			dev_kfree_skb_any(skb);
160874aeea5SJeff Kirsher 			rx_buf->u.skb = NULL;
161874aeea5SJeff Kirsher 			return -EIO;
162874aeea5SJeff Kirsher 		}
163874aeea5SJeff Kirsher 
164874aeea5SJeff Kirsher 		++rx_queue->added_count;
165874aeea5SJeff Kirsher 		++rx_queue->alloc_skb_count;
166874aeea5SJeff Kirsher 	}
167874aeea5SJeff Kirsher 
168874aeea5SJeff Kirsher 	return 0;
169874aeea5SJeff Kirsher }
170874aeea5SJeff Kirsher 
171874aeea5SJeff Kirsher /**
172874aeea5SJeff Kirsher  * efx_init_rx_buffers_page - create EFX_RX_BATCH page-based RX buffers
173874aeea5SJeff Kirsher  *
174874aeea5SJeff Kirsher  * @rx_queue:		Efx RX queue
175874aeea5SJeff Kirsher  *
176874aeea5SJeff Kirsher  * This allocates memory for EFX_RX_BATCH receive buffers, maps them for DMA,
177874aeea5SJeff Kirsher  * and populates struct efx_rx_buffers for each one. Return a negative error
178874aeea5SJeff Kirsher  * code or 0 on success. If a single page can be split between two buffers,
179874aeea5SJeff Kirsher  * then the page will either be inserted fully, or not at at all.
180874aeea5SJeff Kirsher  */
181874aeea5SJeff Kirsher static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
182874aeea5SJeff Kirsher {
183874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
184874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
185874aeea5SJeff Kirsher 	struct page *page;
186b590ace0SBen Hutchings 	unsigned int page_offset;
187874aeea5SJeff Kirsher 	struct efx_rx_page_state *state;
188874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
189874aeea5SJeff Kirsher 	unsigned index, count;
190874aeea5SJeff Kirsher 
191874aeea5SJeff Kirsher 	/* We can split a page between two buffers */
192874aeea5SJeff Kirsher 	BUILD_BUG_ON(EFX_RX_BATCH & 1);
193874aeea5SJeff Kirsher 
194874aeea5SJeff Kirsher 	for (count = 0; count < EFX_RX_BATCH; ++count) {
195874aeea5SJeff Kirsher 		page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC,
196874aeea5SJeff Kirsher 				   efx->rx_buffer_order);
197874aeea5SJeff Kirsher 		if (unlikely(page == NULL))
198874aeea5SJeff Kirsher 			return -ENOMEM;
1990e33d870SBen Hutchings 		dma_addr = dma_map_page(&efx->pci_dev->dev, page, 0,
200874aeea5SJeff Kirsher 					efx_rx_buf_size(efx),
2010e33d870SBen Hutchings 					DMA_FROM_DEVICE);
2020e33d870SBen Hutchings 		if (unlikely(dma_mapping_error(&efx->pci_dev->dev, dma_addr))) {
203874aeea5SJeff Kirsher 			__free_pages(page, efx->rx_buffer_order);
204874aeea5SJeff Kirsher 			return -EIO;
205874aeea5SJeff Kirsher 		}
206b8e02517SBen Hutchings 		state = page_address(page);
207874aeea5SJeff Kirsher 		state->refcnt = 0;
208874aeea5SJeff Kirsher 		state->dma_addr = dma_addr;
209874aeea5SJeff Kirsher 
210874aeea5SJeff Kirsher 		dma_addr += sizeof(struct efx_rx_page_state);
211b590ace0SBen Hutchings 		page_offset = sizeof(struct efx_rx_page_state);
212874aeea5SJeff Kirsher 
213874aeea5SJeff Kirsher 	split:
214874aeea5SJeff Kirsher 		index = rx_queue->added_count & rx_queue->ptr_mask;
215874aeea5SJeff Kirsher 		rx_buf = efx_rx_buffer(rx_queue, index);
216874aeea5SJeff Kirsher 		rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
217874aeea5SJeff Kirsher 		rx_buf->u.page = page;
218c73e787aSBen Hutchings 		rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN;
219874aeea5SJeff Kirsher 		rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
220db339569SBen Hutchings 		rx_buf->flags = EFX_RX_BUF_PAGE;
221874aeea5SJeff Kirsher 		++rx_queue->added_count;
222874aeea5SJeff Kirsher 		++rx_queue->alloc_page_count;
223874aeea5SJeff Kirsher 		++state->refcnt;
224874aeea5SJeff Kirsher 
225874aeea5SJeff Kirsher 		if ((~count & 1) && (efx->rx_buffer_len <= EFX_RX_HALF_PAGE)) {
226874aeea5SJeff Kirsher 			/* Use the second half of the page */
227874aeea5SJeff Kirsher 			get_page(page);
228874aeea5SJeff Kirsher 			dma_addr += (PAGE_SIZE >> 1);
229b590ace0SBen Hutchings 			page_offset += (PAGE_SIZE >> 1);
230874aeea5SJeff Kirsher 			++count;
231874aeea5SJeff Kirsher 			goto split;
232874aeea5SJeff Kirsher 		}
233874aeea5SJeff Kirsher 	}
234874aeea5SJeff Kirsher 
235874aeea5SJeff Kirsher 	return 0;
236874aeea5SJeff Kirsher }
237874aeea5SJeff Kirsher 
238874aeea5SJeff Kirsher static void efx_unmap_rx_buffer(struct efx_nic *efx,
2393a68f19dSBen Hutchings 				struct efx_rx_buffer *rx_buf,
2403a68f19dSBen Hutchings 				unsigned int used_len)
241874aeea5SJeff Kirsher {
242db339569SBen Hutchings 	if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
243874aeea5SJeff Kirsher 		struct efx_rx_page_state *state;
244874aeea5SJeff Kirsher 
245874aeea5SJeff Kirsher 		state = page_address(rx_buf->u.page);
246874aeea5SJeff Kirsher 		if (--state->refcnt == 0) {
2470e33d870SBen Hutchings 			dma_unmap_page(&efx->pci_dev->dev,
248874aeea5SJeff Kirsher 				       state->dma_addr,
249874aeea5SJeff Kirsher 				       efx_rx_buf_size(efx),
2500e33d870SBen Hutchings 				       DMA_FROM_DEVICE);
2513a68f19dSBen Hutchings 		} else if (used_len) {
2523a68f19dSBen Hutchings 			dma_sync_single_for_cpu(&efx->pci_dev->dev,
2533a68f19dSBen Hutchings 						rx_buf->dma_addr, used_len,
2543a68f19dSBen Hutchings 						DMA_FROM_DEVICE);
255874aeea5SJeff Kirsher 		}
256db339569SBen Hutchings 	} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
2570e33d870SBen Hutchings 		dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
2580e33d870SBen Hutchings 				 rx_buf->len, DMA_FROM_DEVICE);
259874aeea5SJeff Kirsher 	}
260874aeea5SJeff Kirsher }
261874aeea5SJeff Kirsher 
262874aeea5SJeff Kirsher static void efx_free_rx_buffer(struct efx_nic *efx,
263874aeea5SJeff Kirsher 			       struct efx_rx_buffer *rx_buf)
264874aeea5SJeff Kirsher {
265db339569SBen Hutchings 	if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
266874aeea5SJeff Kirsher 		__free_pages(rx_buf->u.page, efx->rx_buffer_order);
267874aeea5SJeff Kirsher 		rx_buf->u.page = NULL;
268db339569SBen Hutchings 	} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
269874aeea5SJeff Kirsher 		dev_kfree_skb_any(rx_buf->u.skb);
270874aeea5SJeff Kirsher 		rx_buf->u.skb = NULL;
271874aeea5SJeff Kirsher 	}
272874aeea5SJeff Kirsher }
273874aeea5SJeff Kirsher 
274874aeea5SJeff Kirsher static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
275874aeea5SJeff Kirsher 			       struct efx_rx_buffer *rx_buf)
276874aeea5SJeff Kirsher {
2773a68f19dSBen Hutchings 	efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
278874aeea5SJeff Kirsher 	efx_free_rx_buffer(rx_queue->efx, rx_buf);
279874aeea5SJeff Kirsher }
280874aeea5SJeff Kirsher 
281874aeea5SJeff Kirsher /* Attempt to resurrect the other receive buffer that used to share this page,
282874aeea5SJeff Kirsher  * which had previously been passed up to the kernel and freed. */
283874aeea5SJeff Kirsher static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue,
284874aeea5SJeff Kirsher 				    struct efx_rx_buffer *rx_buf)
285874aeea5SJeff Kirsher {
286874aeea5SJeff Kirsher 	struct efx_rx_page_state *state = page_address(rx_buf->u.page);
287874aeea5SJeff Kirsher 	struct efx_rx_buffer *new_buf;
288874aeea5SJeff Kirsher 	unsigned fill_level, index;
289874aeea5SJeff Kirsher 
290874aeea5SJeff Kirsher 	/* +1 because efx_rx_packet() incremented removed_count. +1 because
291874aeea5SJeff Kirsher 	 * we'd like to insert an additional descriptor whilst leaving
292874aeea5SJeff Kirsher 	 * EFX_RXD_HEAD_ROOM for the non-recycle path */
293874aeea5SJeff Kirsher 	fill_level = (rx_queue->added_count - rx_queue->removed_count + 2);
294874aeea5SJeff Kirsher 	if (unlikely(fill_level > rx_queue->max_fill)) {
295874aeea5SJeff Kirsher 		/* We could place "state" on a list, and drain the list in
296874aeea5SJeff Kirsher 		 * efx_fast_push_rx_descriptors(). For now, this will do. */
297874aeea5SJeff Kirsher 		return;
298874aeea5SJeff Kirsher 	}
299874aeea5SJeff Kirsher 
300874aeea5SJeff Kirsher 	++state->refcnt;
301874aeea5SJeff Kirsher 	get_page(rx_buf->u.page);
302874aeea5SJeff Kirsher 
303874aeea5SJeff Kirsher 	index = rx_queue->added_count & rx_queue->ptr_mask;
304874aeea5SJeff Kirsher 	new_buf = efx_rx_buffer(rx_queue, index);
305874aeea5SJeff Kirsher 	new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1);
306874aeea5SJeff Kirsher 	new_buf->u.page = rx_buf->u.page;
307874aeea5SJeff Kirsher 	new_buf->len = rx_buf->len;
308db339569SBen Hutchings 	new_buf->flags = EFX_RX_BUF_PAGE;
309874aeea5SJeff Kirsher 	++rx_queue->added_count;
310874aeea5SJeff Kirsher }
311874aeea5SJeff Kirsher 
312874aeea5SJeff Kirsher /* Recycle the given rx buffer directly back into the rx_queue. There is
313874aeea5SJeff Kirsher  * always room to add this buffer, because we've just popped a buffer. */
314874aeea5SJeff Kirsher static void efx_recycle_rx_buffer(struct efx_channel *channel,
315874aeea5SJeff Kirsher 				  struct efx_rx_buffer *rx_buf)
316874aeea5SJeff Kirsher {
317874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
318874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
319874aeea5SJeff Kirsher 	struct efx_rx_buffer *new_buf;
320874aeea5SJeff Kirsher 	unsigned index;
321874aeea5SJeff Kirsher 
322db339569SBen Hutchings 	rx_buf->flags &= EFX_RX_BUF_PAGE;
323db339569SBen Hutchings 
324db339569SBen Hutchings 	if ((rx_buf->flags & EFX_RX_BUF_PAGE) &&
325db339569SBen Hutchings 	    efx->rx_buffer_len <= EFX_RX_HALF_PAGE &&
326874aeea5SJeff Kirsher 	    page_count(rx_buf->u.page) == 1)
327874aeea5SJeff Kirsher 		efx_resurrect_rx_buffer(rx_queue, rx_buf);
328874aeea5SJeff Kirsher 
329874aeea5SJeff Kirsher 	index = rx_queue->added_count & rx_queue->ptr_mask;
330874aeea5SJeff Kirsher 	new_buf = efx_rx_buffer(rx_queue, index);
331874aeea5SJeff Kirsher 
332874aeea5SJeff Kirsher 	memcpy(new_buf, rx_buf, sizeof(*new_buf));
333874aeea5SJeff Kirsher 	rx_buf->u.page = NULL;
334874aeea5SJeff Kirsher 	++rx_queue->added_count;
335874aeea5SJeff Kirsher }
336874aeea5SJeff Kirsher 
337874aeea5SJeff Kirsher /**
338874aeea5SJeff Kirsher  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
339874aeea5SJeff Kirsher  * @rx_queue:		RX descriptor queue
34049ce9c2cSBen Hutchings  *
341874aeea5SJeff Kirsher  * This will aim to fill the RX descriptor queue up to
342da9ca505SDavid Riddoch  * @rx_queue->@max_fill. If there is insufficient atomic
343874aeea5SJeff Kirsher  * memory to do so, a slow fill will be scheduled.
344874aeea5SJeff Kirsher  *
345874aeea5SJeff Kirsher  * The caller must provide serialisation (none is used here). In practise,
346874aeea5SJeff Kirsher  * this means this function must run from the NAPI handler, or be called
347874aeea5SJeff Kirsher  * when NAPI is disabled.
348874aeea5SJeff Kirsher  */
349874aeea5SJeff Kirsher void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
350874aeea5SJeff Kirsher {
351874aeea5SJeff Kirsher 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
352874aeea5SJeff Kirsher 	unsigned fill_level;
353874aeea5SJeff Kirsher 	int space, rc = 0;
354874aeea5SJeff Kirsher 
355874aeea5SJeff Kirsher 	/* Calculate current fill level, and exit if we don't need to fill */
356874aeea5SJeff Kirsher 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
357874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
358874aeea5SJeff Kirsher 	if (fill_level >= rx_queue->fast_fill_trigger)
359874aeea5SJeff Kirsher 		goto out;
360874aeea5SJeff Kirsher 
361874aeea5SJeff Kirsher 	/* Record minimum fill level */
362874aeea5SJeff Kirsher 	if (unlikely(fill_level < rx_queue->min_fill)) {
363874aeea5SJeff Kirsher 		if (fill_level)
364874aeea5SJeff Kirsher 			rx_queue->min_fill = fill_level;
365874aeea5SJeff Kirsher 	}
366874aeea5SJeff Kirsher 
367da9ca505SDavid Riddoch 	space = rx_queue->max_fill - fill_level;
36864235187SDavid Riddoch 	EFX_BUG_ON_PARANOID(space < EFX_RX_BATCH);
369874aeea5SJeff Kirsher 
370874aeea5SJeff Kirsher 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
371874aeea5SJeff Kirsher 		   "RX queue %d fast-filling descriptor ring from"
372874aeea5SJeff Kirsher 		   " level %d to level %d using %s allocation\n",
373874aeea5SJeff Kirsher 		   efx_rx_queue_index(rx_queue), fill_level,
374da9ca505SDavid Riddoch 		   rx_queue->max_fill,
375874aeea5SJeff Kirsher 		   channel->rx_alloc_push_pages ? "page" : "skb");
376874aeea5SJeff Kirsher 
377874aeea5SJeff Kirsher 	do {
378874aeea5SJeff Kirsher 		if (channel->rx_alloc_push_pages)
379874aeea5SJeff Kirsher 			rc = efx_init_rx_buffers_page(rx_queue);
380874aeea5SJeff Kirsher 		else
381874aeea5SJeff Kirsher 			rc = efx_init_rx_buffers_skb(rx_queue);
382874aeea5SJeff Kirsher 		if (unlikely(rc)) {
383874aeea5SJeff Kirsher 			/* Ensure that we don't leave the rx queue empty */
384874aeea5SJeff Kirsher 			if (rx_queue->added_count == rx_queue->removed_count)
385874aeea5SJeff Kirsher 				efx_schedule_slow_fill(rx_queue);
386874aeea5SJeff Kirsher 			goto out;
387874aeea5SJeff Kirsher 		}
388874aeea5SJeff Kirsher 	} while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
389874aeea5SJeff Kirsher 
390874aeea5SJeff Kirsher 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
391874aeea5SJeff Kirsher 		   "RX queue %d fast-filled descriptor ring "
392874aeea5SJeff Kirsher 		   "to level %d\n", efx_rx_queue_index(rx_queue),
393874aeea5SJeff Kirsher 		   rx_queue->added_count - rx_queue->removed_count);
394874aeea5SJeff Kirsher 
395874aeea5SJeff Kirsher  out:
396874aeea5SJeff Kirsher 	if (rx_queue->notified_count != rx_queue->added_count)
397874aeea5SJeff Kirsher 		efx_nic_notify_rx_desc(rx_queue);
398874aeea5SJeff Kirsher }
399874aeea5SJeff Kirsher 
400874aeea5SJeff Kirsher void efx_rx_slow_fill(unsigned long context)
401874aeea5SJeff Kirsher {
402874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
403874aeea5SJeff Kirsher 
404874aeea5SJeff Kirsher 	/* Post an event to cause NAPI to run and refill the queue */
4052ae75dacSBen Hutchings 	efx_nic_generate_fill_event(rx_queue);
406874aeea5SJeff Kirsher 	++rx_queue->slow_fill_count;
407874aeea5SJeff Kirsher }
408874aeea5SJeff Kirsher 
409874aeea5SJeff Kirsher static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
410874aeea5SJeff Kirsher 				     struct efx_rx_buffer *rx_buf,
411db339569SBen Hutchings 				     int len, bool *leak_packet)
412874aeea5SJeff Kirsher {
413874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
414874aeea5SJeff Kirsher 	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
415874aeea5SJeff Kirsher 
416874aeea5SJeff Kirsher 	if (likely(len <= max_len))
417874aeea5SJeff Kirsher 		return;
418874aeea5SJeff Kirsher 
419874aeea5SJeff Kirsher 	/* The packet must be discarded, but this is only a fatal error
420874aeea5SJeff Kirsher 	 * if the caller indicated it was
421874aeea5SJeff Kirsher 	 */
422db339569SBen Hutchings 	rx_buf->flags |= EFX_RX_PKT_DISCARD;
423874aeea5SJeff Kirsher 
424874aeea5SJeff Kirsher 	if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
425874aeea5SJeff Kirsher 		if (net_ratelimit())
426874aeea5SJeff Kirsher 			netif_err(efx, rx_err, efx->net_dev,
427874aeea5SJeff Kirsher 				  " RX queue %d seriously overlength "
428874aeea5SJeff Kirsher 				  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
429874aeea5SJeff Kirsher 				  efx_rx_queue_index(rx_queue), len, max_len,
430874aeea5SJeff Kirsher 				  efx->type->rx_buffer_padding);
431874aeea5SJeff Kirsher 		/* If this buffer was skb-allocated, then the meta
432874aeea5SJeff Kirsher 		 * data at the end of the skb will be trashed. So
433874aeea5SJeff Kirsher 		 * we have no choice but to leak the fragment.
434874aeea5SJeff Kirsher 		 */
435db339569SBen Hutchings 		*leak_packet = !(rx_buf->flags & EFX_RX_BUF_PAGE);
436874aeea5SJeff Kirsher 		efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
437874aeea5SJeff Kirsher 	} else {
438874aeea5SJeff Kirsher 		if (net_ratelimit())
439874aeea5SJeff Kirsher 			netif_err(efx, rx_err, efx->net_dev,
440874aeea5SJeff Kirsher 				  " RX queue %d overlength RX event "
441874aeea5SJeff Kirsher 				  "(0x%x > 0x%x)\n",
442874aeea5SJeff Kirsher 				  efx_rx_queue_index(rx_queue), len, max_len);
443874aeea5SJeff Kirsher 	}
444874aeea5SJeff Kirsher 
445874aeea5SJeff Kirsher 	efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
446874aeea5SJeff Kirsher }
447874aeea5SJeff Kirsher 
44861321d92SBen Hutchings /* Pass a received packet up through GRO.  GRO can handle pages
44961321d92SBen Hutchings  * regardless of checksum state and skbs with a good checksum.
450874aeea5SJeff Kirsher  */
451874aeea5SJeff Kirsher static void efx_rx_packet_gro(struct efx_channel *channel,
452874aeea5SJeff Kirsher 			      struct efx_rx_buffer *rx_buf,
453db339569SBen Hutchings 			      const u8 *eh)
454874aeea5SJeff Kirsher {
455874aeea5SJeff Kirsher 	struct napi_struct *napi = &channel->napi_str;
456874aeea5SJeff Kirsher 	gro_result_t gro_result;
457874aeea5SJeff Kirsher 
458db339569SBen Hutchings 	if (rx_buf->flags & EFX_RX_BUF_PAGE) {
459874aeea5SJeff Kirsher 		struct efx_nic *efx = channel->efx;
460874aeea5SJeff Kirsher 		struct page *page = rx_buf->u.page;
461874aeea5SJeff Kirsher 		struct sk_buff *skb;
462874aeea5SJeff Kirsher 
463874aeea5SJeff Kirsher 		rx_buf->u.page = NULL;
464874aeea5SJeff Kirsher 
465874aeea5SJeff Kirsher 		skb = napi_get_frags(napi);
466874aeea5SJeff Kirsher 		if (!skb) {
467874aeea5SJeff Kirsher 			put_page(page);
468874aeea5SJeff Kirsher 			return;
469874aeea5SJeff Kirsher 		}
470874aeea5SJeff Kirsher 
471874aeea5SJeff Kirsher 		if (efx->net_dev->features & NETIF_F_RXHASH)
472874aeea5SJeff Kirsher 			skb->rxhash = efx_rx_buf_hash(eh);
473874aeea5SJeff Kirsher 
47470350b06SBen Hutchings 		skb_fill_page_desc(skb, 0, page,
47570350b06SBen Hutchings 				   efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
476874aeea5SJeff Kirsher 
477874aeea5SJeff Kirsher 		skb->len = rx_buf->len;
478874aeea5SJeff Kirsher 		skb->data_len = rx_buf->len;
479874aeea5SJeff Kirsher 		skb->truesize += rx_buf->len;
480db339569SBen Hutchings 		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
481db339569SBen Hutchings 				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
482874aeea5SJeff Kirsher 
48379d68b37SStuart Hodgson 		skb_record_rx_queue(skb, channel->rx_queue.core_index);
484874aeea5SJeff Kirsher 
485874aeea5SJeff Kirsher 		gro_result = napi_gro_frags(napi);
486874aeea5SJeff Kirsher 	} else {
487874aeea5SJeff Kirsher 		struct sk_buff *skb = rx_buf->u.skb;
488874aeea5SJeff Kirsher 
489db339569SBen Hutchings 		EFX_BUG_ON_PARANOID(!(rx_buf->flags & EFX_RX_PKT_CSUMMED));
490874aeea5SJeff Kirsher 		rx_buf->u.skb = NULL;
491ff3bc1e7SBen Hutchings 		skb->ip_summed = CHECKSUM_UNNECESSARY;
492874aeea5SJeff Kirsher 
493874aeea5SJeff Kirsher 		gro_result = napi_gro_receive(napi, skb);
494874aeea5SJeff Kirsher 	}
495874aeea5SJeff Kirsher 
496874aeea5SJeff Kirsher 	if (gro_result == GRO_NORMAL) {
497874aeea5SJeff Kirsher 		channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
498874aeea5SJeff Kirsher 	} else if (gro_result != GRO_DROP) {
499874aeea5SJeff Kirsher 		channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
500874aeea5SJeff Kirsher 		channel->irq_mod_score += 2;
501874aeea5SJeff Kirsher 	}
502874aeea5SJeff Kirsher }
503874aeea5SJeff Kirsher 
504874aeea5SJeff Kirsher void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
505db339569SBen Hutchings 		   unsigned int len, u16 flags)
506874aeea5SJeff Kirsher {
507874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
508874aeea5SJeff Kirsher 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
509874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
510874aeea5SJeff Kirsher 	bool leak_packet = false;
511874aeea5SJeff Kirsher 
512874aeea5SJeff Kirsher 	rx_buf = efx_rx_buffer(rx_queue, index);
513db339569SBen Hutchings 	rx_buf->flags |= flags;
514874aeea5SJeff Kirsher 
515874aeea5SJeff Kirsher 	/* This allows the refill path to post another buffer.
516874aeea5SJeff Kirsher 	 * EFX_RXD_HEAD_ROOM ensures that the slot we are using
517874aeea5SJeff Kirsher 	 * isn't overwritten yet.
518874aeea5SJeff Kirsher 	 */
519874aeea5SJeff Kirsher 	rx_queue->removed_count++;
520874aeea5SJeff Kirsher 
521874aeea5SJeff Kirsher 	/* Validate the length encoded in the event vs the descriptor pushed */
522db339569SBen Hutchings 	efx_rx_packet__check_len(rx_queue, rx_buf, len, &leak_packet);
523874aeea5SJeff Kirsher 
524874aeea5SJeff Kirsher 	netif_vdbg(efx, rx_status, efx->net_dev,
525874aeea5SJeff Kirsher 		   "RX queue %d received id %x at %llx+%x %s%s\n",
526874aeea5SJeff Kirsher 		   efx_rx_queue_index(rx_queue), index,
527874aeea5SJeff Kirsher 		   (unsigned long long)rx_buf->dma_addr, len,
528db339569SBen Hutchings 		   (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
529db339569SBen Hutchings 		   (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
530874aeea5SJeff Kirsher 
531874aeea5SJeff Kirsher 	/* Discard packet, if instructed to do so */
532db339569SBen Hutchings 	if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
533874aeea5SJeff Kirsher 		if (unlikely(leak_packet))
534874aeea5SJeff Kirsher 			channel->n_skbuff_leaks++;
535874aeea5SJeff Kirsher 		else
536874aeea5SJeff Kirsher 			efx_recycle_rx_buffer(channel, rx_buf);
537874aeea5SJeff Kirsher 
538874aeea5SJeff Kirsher 		/* Don't hold off the previous receive */
539874aeea5SJeff Kirsher 		rx_buf = NULL;
540874aeea5SJeff Kirsher 		goto out;
541874aeea5SJeff Kirsher 	}
542874aeea5SJeff Kirsher 
5433a68f19dSBen Hutchings 	/* Release and/or sync DMA mapping - assumes all RX buffers
5443a68f19dSBen Hutchings 	 * consumed in-order per RX queue
545874aeea5SJeff Kirsher 	 */
5463a68f19dSBen Hutchings 	efx_unmap_rx_buffer(efx, rx_buf, len);
547874aeea5SJeff Kirsher 
548874aeea5SJeff Kirsher 	/* Prefetch nice and early so data will (hopefully) be in cache by
549874aeea5SJeff Kirsher 	 * the time we look at it.
550874aeea5SJeff Kirsher 	 */
551874aeea5SJeff Kirsher 	prefetch(efx_rx_buf_eh(efx, rx_buf));
552874aeea5SJeff Kirsher 
553874aeea5SJeff Kirsher 	/* Pipeline receives so that we give time for packet headers to be
554874aeea5SJeff Kirsher 	 * prefetched into cache.
555874aeea5SJeff Kirsher 	 */
556874aeea5SJeff Kirsher 	rx_buf->len = len - efx->type->rx_buffer_hash_size;
557874aeea5SJeff Kirsher out:
558874aeea5SJeff Kirsher 	if (channel->rx_pkt)
559db339569SBen Hutchings 		__efx_rx_packet(channel, channel->rx_pkt);
560874aeea5SJeff Kirsher 	channel->rx_pkt = rx_buf;
561874aeea5SJeff Kirsher }
562874aeea5SJeff Kirsher 
5631ddceb4cSBen Hutchings static void efx_rx_deliver(struct efx_channel *channel,
5641ddceb4cSBen Hutchings 			   struct efx_rx_buffer *rx_buf)
5651ddceb4cSBen Hutchings {
5661ddceb4cSBen Hutchings 	struct sk_buff *skb;
5671ddceb4cSBen Hutchings 
5681ddceb4cSBen Hutchings 	/* We now own the SKB */
5691ddceb4cSBen Hutchings 	skb = rx_buf->u.skb;
5701ddceb4cSBen Hutchings 	rx_buf->u.skb = NULL;
5711ddceb4cSBen Hutchings 
5721ddceb4cSBen Hutchings 	/* Set the SKB flags */
5731ddceb4cSBen Hutchings 	skb_checksum_none_assert(skb);
5741ddceb4cSBen Hutchings 
57579d68b37SStuart Hodgson 	/* Record the rx_queue */
57679d68b37SStuart Hodgson 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
57779d68b37SStuart Hodgson 
578c31e5f9fSStuart Hodgson 	if (channel->type->receive_skb)
579*4a74dc65SBen Hutchings 		if (channel->type->receive_skb(channel, skb))
580*4a74dc65SBen Hutchings 			goto handled;
581*4a74dc65SBen Hutchings 
582*4a74dc65SBen Hutchings 	/* Pass the packet up */
5831ddceb4cSBen Hutchings 	netif_receive_skb(skb);
5841ddceb4cSBen Hutchings 
585*4a74dc65SBen Hutchings handled:
5861ddceb4cSBen Hutchings 	/* Update allocation strategy method */
5871ddceb4cSBen Hutchings 	channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
5881ddceb4cSBen Hutchings }
5891ddceb4cSBen Hutchings 
590874aeea5SJeff Kirsher /* Handle a received packet.  Second half: Touches packet payload. */
591db339569SBen Hutchings void __efx_rx_packet(struct efx_channel *channel, struct efx_rx_buffer *rx_buf)
592874aeea5SJeff Kirsher {
593874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
594874aeea5SJeff Kirsher 	u8 *eh = efx_rx_buf_eh(efx, rx_buf);
595874aeea5SJeff Kirsher 
596874aeea5SJeff Kirsher 	/* If we're in loopback test, then pass the packet directly to the
597874aeea5SJeff Kirsher 	 * loopback layer, and free the rx_buf here
598874aeea5SJeff Kirsher 	 */
599874aeea5SJeff Kirsher 	if (unlikely(efx->loopback_selftest)) {
600874aeea5SJeff Kirsher 		efx_loopback_rx_packet(efx, eh, rx_buf->len);
601874aeea5SJeff Kirsher 		efx_free_rx_buffer(efx, rx_buf);
602874aeea5SJeff Kirsher 		return;
603874aeea5SJeff Kirsher 	}
604874aeea5SJeff Kirsher 
605db339569SBen Hutchings 	if (!(rx_buf->flags & EFX_RX_BUF_PAGE)) {
6061ddceb4cSBen Hutchings 		struct sk_buff *skb = rx_buf->u.skb;
607874aeea5SJeff Kirsher 
608874aeea5SJeff Kirsher 		prefetch(skb_shinfo(skb));
609874aeea5SJeff Kirsher 
610874aeea5SJeff Kirsher 		skb_reserve(skb, efx->type->rx_buffer_hash_size);
611874aeea5SJeff Kirsher 		skb_put(skb, rx_buf->len);
612874aeea5SJeff Kirsher 
613874aeea5SJeff Kirsher 		if (efx->net_dev->features & NETIF_F_RXHASH)
614874aeea5SJeff Kirsher 			skb->rxhash = efx_rx_buf_hash(eh);
615874aeea5SJeff Kirsher 
616874aeea5SJeff Kirsher 		/* Move past the ethernet header. rx_buf->data still points
617874aeea5SJeff Kirsher 		 * at the ethernet header */
618874aeea5SJeff Kirsher 		skb->protocol = eth_type_trans(skb, efx->net_dev);
619874aeea5SJeff Kirsher 
62079d68b37SStuart Hodgson 		skb_record_rx_queue(skb, channel->rx_queue.core_index);
621874aeea5SJeff Kirsher 	}
622874aeea5SJeff Kirsher 
623874aeea5SJeff Kirsher 	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
624db339569SBen Hutchings 		rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
625874aeea5SJeff Kirsher 
626c31e5f9fSStuart Hodgson 	if (likely(rx_buf->flags & (EFX_RX_BUF_PAGE | EFX_RX_PKT_CSUMMED)) &&
627c31e5f9fSStuart Hodgson 	    !channel->type->receive_skb)
628db339569SBen Hutchings 		efx_rx_packet_gro(channel, rx_buf, eh);
6291ddceb4cSBen Hutchings 	else
6301ddceb4cSBen Hutchings 		efx_rx_deliver(channel, rx_buf);
631874aeea5SJeff Kirsher }
632874aeea5SJeff Kirsher 
633874aeea5SJeff Kirsher void efx_rx_strategy(struct efx_channel *channel)
634874aeea5SJeff Kirsher {
635874aeea5SJeff Kirsher 	enum efx_rx_alloc_method method = rx_alloc_method;
636874aeea5SJeff Kirsher 
637c31e5f9fSStuart Hodgson 	if (channel->type->receive_skb) {
638c31e5f9fSStuart Hodgson 		channel->rx_alloc_push_pages = false;
639c31e5f9fSStuart Hodgson 		return;
640c31e5f9fSStuart Hodgson 	}
641c31e5f9fSStuart Hodgson 
642874aeea5SJeff Kirsher 	/* Only makes sense to use page based allocation if GRO is enabled */
643874aeea5SJeff Kirsher 	if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
644874aeea5SJeff Kirsher 		method = RX_ALLOC_METHOD_SKB;
645874aeea5SJeff Kirsher 	} else if (method == RX_ALLOC_METHOD_AUTO) {
646874aeea5SJeff Kirsher 		/* Constrain the rx_alloc_level */
647874aeea5SJeff Kirsher 		if (channel->rx_alloc_level < 0)
648874aeea5SJeff Kirsher 			channel->rx_alloc_level = 0;
649874aeea5SJeff Kirsher 		else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX)
650874aeea5SJeff Kirsher 			channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
651874aeea5SJeff Kirsher 
652874aeea5SJeff Kirsher 		/* Decide on the allocation method */
653874aeea5SJeff Kirsher 		method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
654874aeea5SJeff Kirsher 			  RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
655874aeea5SJeff Kirsher 	}
656874aeea5SJeff Kirsher 
657874aeea5SJeff Kirsher 	/* Push the option */
658874aeea5SJeff Kirsher 	channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE);
659874aeea5SJeff Kirsher }
660874aeea5SJeff Kirsher 
661874aeea5SJeff Kirsher int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
662874aeea5SJeff Kirsher {
663874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
664874aeea5SJeff Kirsher 	unsigned int entries;
665874aeea5SJeff Kirsher 	int rc;
666874aeea5SJeff Kirsher 
667874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
668874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
669874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
670874aeea5SJeff Kirsher 	rx_queue->ptr_mask = entries - 1;
671874aeea5SJeff Kirsher 
672874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
673874aeea5SJeff Kirsher 		  "creating RX queue %d size %#x mask %#x\n",
674874aeea5SJeff Kirsher 		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
675874aeea5SJeff Kirsher 		  rx_queue->ptr_mask);
676874aeea5SJeff Kirsher 
677874aeea5SJeff Kirsher 	/* Allocate RX buffers */
678c2e4e25aSThomas Meyer 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
679874aeea5SJeff Kirsher 				   GFP_KERNEL);
680874aeea5SJeff Kirsher 	if (!rx_queue->buffer)
681874aeea5SJeff Kirsher 		return -ENOMEM;
682874aeea5SJeff Kirsher 
683874aeea5SJeff Kirsher 	rc = efx_nic_probe_rx(rx_queue);
684874aeea5SJeff Kirsher 	if (rc) {
685874aeea5SJeff Kirsher 		kfree(rx_queue->buffer);
686874aeea5SJeff Kirsher 		rx_queue->buffer = NULL;
687874aeea5SJeff Kirsher 	}
688874aeea5SJeff Kirsher 	return rc;
689874aeea5SJeff Kirsher }
690874aeea5SJeff Kirsher 
691874aeea5SJeff Kirsher void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
692874aeea5SJeff Kirsher {
693874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
69464235187SDavid Riddoch 	unsigned int max_fill, trigger, max_trigger;
695874aeea5SJeff Kirsher 
696874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
697874aeea5SJeff Kirsher 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
698874aeea5SJeff Kirsher 
699874aeea5SJeff Kirsher 	/* Initialise ptr fields */
700874aeea5SJeff Kirsher 	rx_queue->added_count = 0;
701874aeea5SJeff Kirsher 	rx_queue->notified_count = 0;
702874aeea5SJeff Kirsher 	rx_queue->removed_count = 0;
703874aeea5SJeff Kirsher 	rx_queue->min_fill = -1U;
704874aeea5SJeff Kirsher 
705874aeea5SJeff Kirsher 	/* Initialise limit fields */
706874aeea5SJeff Kirsher 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
70764235187SDavid Riddoch 	max_trigger = max_fill - EFX_RX_BATCH;
70864235187SDavid Riddoch 	if (rx_refill_threshold != 0) {
709874aeea5SJeff Kirsher 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
71064235187SDavid Riddoch 		if (trigger > max_trigger)
71164235187SDavid Riddoch 			trigger = max_trigger;
71264235187SDavid Riddoch 	} else {
71364235187SDavid Riddoch 		trigger = max_trigger;
71464235187SDavid Riddoch 	}
715874aeea5SJeff Kirsher 
716874aeea5SJeff Kirsher 	rx_queue->max_fill = max_fill;
717874aeea5SJeff Kirsher 	rx_queue->fast_fill_trigger = trigger;
718874aeea5SJeff Kirsher 
719874aeea5SJeff Kirsher 	/* Set up RX descriptor ring */
7209f2cb71cSBen Hutchings 	rx_queue->enabled = true;
721874aeea5SJeff Kirsher 	efx_nic_init_rx(rx_queue);
722874aeea5SJeff Kirsher }
723874aeea5SJeff Kirsher 
724874aeea5SJeff Kirsher void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
725874aeea5SJeff Kirsher {
726874aeea5SJeff Kirsher 	int i;
727874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
728874aeea5SJeff Kirsher 
729874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
730874aeea5SJeff Kirsher 		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
731874aeea5SJeff Kirsher 
7329f2cb71cSBen Hutchings 	/* A flush failure might have left rx_queue->enabled */
7339f2cb71cSBen Hutchings 	rx_queue->enabled = false;
7349f2cb71cSBen Hutchings 
735874aeea5SJeff Kirsher 	del_timer_sync(&rx_queue->slow_fill);
736874aeea5SJeff Kirsher 	efx_nic_fini_rx(rx_queue);
737874aeea5SJeff Kirsher 
738874aeea5SJeff Kirsher 	/* Release RX buffers NB start at index 0 not current HW ptr */
739874aeea5SJeff Kirsher 	if (rx_queue->buffer) {
740874aeea5SJeff Kirsher 		for (i = 0; i <= rx_queue->ptr_mask; i++) {
741874aeea5SJeff Kirsher 			rx_buf = efx_rx_buffer(rx_queue, i);
742874aeea5SJeff Kirsher 			efx_fini_rx_buffer(rx_queue, rx_buf);
743874aeea5SJeff Kirsher 		}
744874aeea5SJeff Kirsher 	}
745874aeea5SJeff Kirsher }
746874aeea5SJeff Kirsher 
747874aeea5SJeff Kirsher void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
748874aeea5SJeff Kirsher {
749874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
750874aeea5SJeff Kirsher 		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
751874aeea5SJeff Kirsher 
752874aeea5SJeff Kirsher 	efx_nic_remove_rx(rx_queue);
753874aeea5SJeff Kirsher 
754874aeea5SJeff Kirsher 	kfree(rx_queue->buffer);
755874aeea5SJeff Kirsher 	rx_queue->buffer = NULL;
756874aeea5SJeff Kirsher }
757874aeea5SJeff Kirsher 
758874aeea5SJeff Kirsher 
759874aeea5SJeff Kirsher module_param(rx_alloc_method, int, 0644);
760874aeea5SJeff Kirsher MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers");
761874aeea5SJeff Kirsher 
762874aeea5SJeff Kirsher module_param(rx_refill_threshold, uint, 0444);
763874aeea5SJeff Kirsher MODULE_PARM_DESC(rx_refill_threshold,
76464235187SDavid Riddoch 		 "RX descriptor ring refill threshold (%)");
765874aeea5SJeff Kirsher 
766