xref: /linux/drivers/net/ethernet/sfc/rx.c (revision 8ccf3800dbdeaf26bcdefa471c9c8e0da7e6ec7a)
1874aeea5SJeff Kirsher /****************************************************************************
2f7a6d2c4SBen Hutchings  * Driver for Solarflare network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4f7a6d2c4SBen Hutchings  * Copyright 2005-2013 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/socket.h>
12874aeea5SJeff Kirsher #include <linux/in.h>
13874aeea5SJeff Kirsher #include <linux/slab.h>
14874aeea5SJeff Kirsher #include <linux/ip.h>
15c47b2d9dSBen Hutchings #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/tcp.h>
17874aeea5SJeff Kirsher #include <linux/udp.h>
18874aeea5SJeff Kirsher #include <linux/prefetch.h>
196eb07cafSPaul Gortmaker #include <linux/moduleparam.h>
202768935aSDaniel Pieczko #include <linux/iommu.h>
21874aeea5SJeff Kirsher #include <net/ip.h>
22874aeea5SJeff Kirsher #include <net/checksum.h>
23874aeea5SJeff Kirsher #include "net_driver.h"
24874aeea5SJeff Kirsher #include "efx.h"
25add72477SBen Hutchings #include "filter.h"
26874aeea5SJeff Kirsher #include "nic.h"
27874aeea5SJeff Kirsher #include "selftest.h"
28874aeea5SJeff Kirsher #include "workarounds.h"
29874aeea5SJeff Kirsher 
301648a23fSDaniel Pieczko /* Preferred number of descriptors to fill at once */
311648a23fSDaniel Pieczko #define EFX_RX_PREFERRED_BATCH 8U
32874aeea5SJeff Kirsher 
332768935aSDaniel Pieczko /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
342768935aSDaniel Pieczko  * ring, this number is divided by the number of buffers per page to calculate
352768935aSDaniel Pieczko  * the number of pages to store in the RX page recycle ring.
362768935aSDaniel Pieczko  */
372768935aSDaniel Pieczko #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
381648a23fSDaniel Pieczko #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
39874aeea5SJeff Kirsher 
40874aeea5SJeff Kirsher /* Size of buffer allocated for skb header area. */
41d4ef5b6fSJon Cooper #define EFX_SKB_HEADERS  128u
42874aeea5SJeff Kirsher 
43874aeea5SJeff Kirsher /* This is the percentage fill level below which new RX descriptors
44874aeea5SJeff Kirsher  * will be added to the RX descriptor ring.
45874aeea5SJeff Kirsher  */
4664235187SDavid Riddoch static unsigned int rx_refill_threshold;
47874aeea5SJeff Kirsher 
4885740cdfSBen Hutchings /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
4985740cdfSBen Hutchings #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
5085740cdfSBen Hutchings 				      EFX_RX_USR_BUF_SIZE)
5185740cdfSBen Hutchings 
52874aeea5SJeff Kirsher /*
53874aeea5SJeff Kirsher  * RX maximum head room required.
54874aeea5SJeff Kirsher  *
5585740cdfSBen Hutchings  * This must be at least 1 to prevent overflow, plus one packet-worth
5685740cdfSBen Hutchings  * to allow pipelined receives.
57874aeea5SJeff Kirsher  */
5885740cdfSBen Hutchings #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
59874aeea5SJeff Kirsher 
60b184f16bSBen Hutchings static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
61874aeea5SJeff Kirsher {
62b184f16bSBen Hutchings 	return page_address(buf->page) + buf->page_offset;
63874aeea5SJeff Kirsher }
64874aeea5SJeff Kirsher 
6543a3739dSJon Cooper static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
66874aeea5SJeff Kirsher {
6743a3739dSJon Cooper #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
6843a3739dSJon Cooper 	return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
69874aeea5SJeff Kirsher #else
7043a3739dSJon Cooper 	const u8 *data = eh + efx->rx_packet_hash_offset;
710beaca2cSBen Hutchings 	return (u32)data[0]	  |
72874aeea5SJeff Kirsher 	       (u32)data[1] << 8  |
73874aeea5SJeff Kirsher 	       (u32)data[2] << 16 |
740beaca2cSBen Hutchings 	       (u32)data[3] << 24;
75874aeea5SJeff Kirsher #endif
76874aeea5SJeff Kirsher }
77874aeea5SJeff Kirsher 
7885740cdfSBen Hutchings static inline struct efx_rx_buffer *
7985740cdfSBen Hutchings efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
8085740cdfSBen Hutchings {
8185740cdfSBen Hutchings 	if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
8285740cdfSBen Hutchings 		return efx_rx_buffer(rx_queue, 0);
8385740cdfSBen Hutchings 	else
8485740cdfSBen Hutchings 		return rx_buf + 1;
8585740cdfSBen Hutchings }
8685740cdfSBen Hutchings 
872768935aSDaniel Pieczko static inline void efx_sync_rx_buffer(struct efx_nic *efx,
882768935aSDaniel Pieczko 				      struct efx_rx_buffer *rx_buf,
892768935aSDaniel Pieczko 				      unsigned int len)
902768935aSDaniel Pieczko {
912768935aSDaniel Pieczko 	dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
922768935aSDaniel Pieczko 				DMA_FROM_DEVICE);
932768935aSDaniel Pieczko }
942768935aSDaniel Pieczko 
951648a23fSDaniel Pieczko void efx_rx_config_page_split(struct efx_nic *efx)
961648a23fSDaniel Pieczko {
972ec03014SAndrew Rybchenko 	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
98950c54dfSBen Hutchings 				      EFX_RX_BUF_ALIGNMENT);
991648a23fSDaniel Pieczko 	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
1001648a23fSDaniel Pieczko 		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
1011648a23fSDaniel Pieczko 		 efx->rx_page_buf_step);
1021648a23fSDaniel Pieczko 	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
1031648a23fSDaniel Pieczko 		efx->rx_bufs_per_page;
1041648a23fSDaniel Pieczko 	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
1051648a23fSDaniel Pieczko 					       efx->rx_bufs_per_page);
1061648a23fSDaniel Pieczko }
1071648a23fSDaniel Pieczko 
1082768935aSDaniel Pieczko /* Check the RX page recycle ring for a page that can be reused. */
1092768935aSDaniel Pieczko static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
1102768935aSDaniel Pieczko {
1112768935aSDaniel Pieczko 	struct efx_nic *efx = rx_queue->efx;
1122768935aSDaniel Pieczko 	struct page *page;
1132768935aSDaniel Pieczko 	struct efx_rx_page_state *state;
1142768935aSDaniel Pieczko 	unsigned index;
1152768935aSDaniel Pieczko 
1162768935aSDaniel Pieczko 	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
1172768935aSDaniel Pieczko 	page = rx_queue->page_ring[index];
1182768935aSDaniel Pieczko 	if (page == NULL)
1192768935aSDaniel Pieczko 		return NULL;
1202768935aSDaniel Pieczko 
1212768935aSDaniel Pieczko 	rx_queue->page_ring[index] = NULL;
1222768935aSDaniel Pieczko 	/* page_remove cannot exceed page_add. */
1232768935aSDaniel Pieczko 	if (rx_queue->page_remove != rx_queue->page_add)
1242768935aSDaniel Pieczko 		++rx_queue->page_remove;
1252768935aSDaniel Pieczko 
1262768935aSDaniel Pieczko 	/* If page_count is 1 then we hold the only reference to this page. */
1272768935aSDaniel Pieczko 	if (page_count(page) == 1) {
1282768935aSDaniel Pieczko 		++rx_queue->page_recycle_count;
1292768935aSDaniel Pieczko 		return page;
1302768935aSDaniel Pieczko 	} else {
1312768935aSDaniel Pieczko 		state = page_address(page);
1322768935aSDaniel Pieczko 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
1332768935aSDaniel Pieczko 			       PAGE_SIZE << efx->rx_buffer_order,
1342768935aSDaniel Pieczko 			       DMA_FROM_DEVICE);
1352768935aSDaniel Pieczko 		put_page(page);
1362768935aSDaniel Pieczko 		++rx_queue->page_recycle_failed;
1372768935aSDaniel Pieczko 	}
1382768935aSDaniel Pieczko 
1392768935aSDaniel Pieczko 	return NULL;
1402768935aSDaniel Pieczko }
1412768935aSDaniel Pieczko 
142874aeea5SJeff Kirsher /**
14397d48a10SAlexandre Rames  * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
144874aeea5SJeff Kirsher  *
145874aeea5SJeff Kirsher  * @rx_queue:		Efx RX queue
146874aeea5SJeff Kirsher  *
1471648a23fSDaniel Pieczko  * This allocates a batch of pages, maps them for DMA, and populates
1481648a23fSDaniel Pieczko  * struct efx_rx_buffers for each one. Return a negative error code or
1491648a23fSDaniel Pieczko  * 0 on success. If a single page can be used for multiple buffers,
1501648a23fSDaniel Pieczko  * then the page will either be inserted fully, or not at all.
151874aeea5SJeff Kirsher  */
152cce28794SJon Cooper static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
153874aeea5SJeff Kirsher {
154874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
155874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
156874aeea5SJeff Kirsher 	struct page *page;
157b590ace0SBen Hutchings 	unsigned int page_offset;
158874aeea5SJeff Kirsher 	struct efx_rx_page_state *state;
159874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
160874aeea5SJeff Kirsher 	unsigned index, count;
161874aeea5SJeff Kirsher 
1621648a23fSDaniel Pieczko 	count = 0;
1631648a23fSDaniel Pieczko 	do {
1642768935aSDaniel Pieczko 		page = efx_reuse_page(rx_queue);
1652768935aSDaniel Pieczko 		if (page == NULL) {
166cce28794SJon Cooper 			page = alloc_pages(__GFP_COLD | __GFP_COMP |
167cce28794SJon Cooper 					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
168874aeea5SJeff Kirsher 					   efx->rx_buffer_order);
169874aeea5SJeff Kirsher 			if (unlikely(page == NULL))
170874aeea5SJeff Kirsher 				return -ENOMEM;
1712768935aSDaniel Pieczko 			dma_addr =
1722768935aSDaniel Pieczko 				dma_map_page(&efx->pci_dev->dev, page, 0,
173272baeebSBen Hutchings 					     PAGE_SIZE << efx->rx_buffer_order,
1740e33d870SBen Hutchings 					     DMA_FROM_DEVICE);
1752768935aSDaniel Pieczko 			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
1762768935aSDaniel Pieczko 						       dma_addr))) {
177874aeea5SJeff Kirsher 				__free_pages(page, efx->rx_buffer_order);
178874aeea5SJeff Kirsher 				return -EIO;
179874aeea5SJeff Kirsher 			}
180b8e02517SBen Hutchings 			state = page_address(page);
181874aeea5SJeff Kirsher 			state->dma_addr = dma_addr;
1822768935aSDaniel Pieczko 		} else {
1832768935aSDaniel Pieczko 			state = page_address(page);
1842768935aSDaniel Pieczko 			dma_addr = state->dma_addr;
1852768935aSDaniel Pieczko 		}
186874aeea5SJeff Kirsher 
187874aeea5SJeff Kirsher 		dma_addr += sizeof(struct efx_rx_page_state);
188b590ace0SBen Hutchings 		page_offset = sizeof(struct efx_rx_page_state);
189874aeea5SJeff Kirsher 
1901648a23fSDaniel Pieczko 		do {
191874aeea5SJeff Kirsher 			index = rx_queue->added_count & rx_queue->ptr_mask;
192874aeea5SJeff Kirsher 			rx_buf = efx_rx_buffer(rx_queue, index);
1932ec03014SAndrew Rybchenko 			rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
19497d48a10SAlexandre Rames 			rx_buf->page = page;
1952ec03014SAndrew Rybchenko 			rx_buf->page_offset = page_offset + efx->rx_ip_align;
196272baeebSBen Hutchings 			rx_buf->len = efx->rx_dma_len;
197179ea7f0SBen Hutchings 			rx_buf->flags = 0;
1981648a23fSDaniel Pieczko 			++rx_queue->added_count;
1991648a23fSDaniel Pieczko 			get_page(page);
2001648a23fSDaniel Pieczko 			dma_addr += efx->rx_page_buf_step;
2011648a23fSDaniel Pieczko 			page_offset += efx->rx_page_buf_step;
2021648a23fSDaniel Pieczko 		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
203179ea7f0SBen Hutchings 
204179ea7f0SBen Hutchings 		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
2051648a23fSDaniel Pieczko 	} while (++count < efx->rx_pages_per_batch);
206874aeea5SJeff Kirsher 
207874aeea5SJeff Kirsher 	return 0;
208874aeea5SJeff Kirsher }
209874aeea5SJeff Kirsher 
2102768935aSDaniel Pieczko /* Unmap a DMA-mapped page.  This function is only called for the final RX
2112768935aSDaniel Pieczko  * buffer in a page.
2122768935aSDaniel Pieczko  */
213874aeea5SJeff Kirsher static void efx_unmap_rx_buffer(struct efx_nic *efx,
2142768935aSDaniel Pieczko 				struct efx_rx_buffer *rx_buf)
215874aeea5SJeff Kirsher {
2162768935aSDaniel Pieczko 	struct page *page = rx_buf->page;
217874aeea5SJeff Kirsher 
2182768935aSDaniel Pieczko 	if (page) {
2192768935aSDaniel Pieczko 		struct efx_rx_page_state *state = page_address(page);
2200e33d870SBen Hutchings 		dma_unmap_page(&efx->pci_dev->dev,
221874aeea5SJeff Kirsher 			       state->dma_addr,
222272baeebSBen Hutchings 			       PAGE_SIZE << efx->rx_buffer_order,
2230e33d870SBen Hutchings 			       DMA_FROM_DEVICE);
224874aeea5SJeff Kirsher 	}
225874aeea5SJeff Kirsher }
226874aeea5SJeff Kirsher 
2272768935aSDaniel Pieczko static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
228874aeea5SJeff Kirsher {
22997d48a10SAlexandre Rames 	if (rx_buf->page) {
2302768935aSDaniel Pieczko 		put_page(rx_buf->page);
23197d48a10SAlexandre Rames 		rx_buf->page = NULL;
232874aeea5SJeff Kirsher 	}
233874aeea5SJeff Kirsher }
234874aeea5SJeff Kirsher 
2352768935aSDaniel Pieczko /* Attempt to recycle the page if there is an RX recycle ring; the page can
2362768935aSDaniel Pieczko  * only be added if this is the final RX buffer, to prevent pages being used in
2372768935aSDaniel Pieczko  * the descriptor ring and appearing in the recycle ring simultaneously.
2382768935aSDaniel Pieczko  */
2392768935aSDaniel Pieczko static void efx_recycle_rx_page(struct efx_channel *channel,
2402768935aSDaniel Pieczko 				struct efx_rx_buffer *rx_buf)
2412768935aSDaniel Pieczko {
2422768935aSDaniel Pieczko 	struct page *page = rx_buf->page;
2432768935aSDaniel Pieczko 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
2442768935aSDaniel Pieczko 	struct efx_nic *efx = rx_queue->efx;
2452768935aSDaniel Pieczko 	unsigned index;
2462768935aSDaniel Pieczko 
2472768935aSDaniel Pieczko 	/* Only recycle the page after processing the final buffer. */
248179ea7f0SBen Hutchings 	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
2492768935aSDaniel Pieczko 		return;
2502768935aSDaniel Pieczko 
2512768935aSDaniel Pieczko 	index = rx_queue->page_add & rx_queue->page_ptr_mask;
2522768935aSDaniel Pieczko 	if (rx_queue->page_ring[index] == NULL) {
2532768935aSDaniel Pieczko 		unsigned read_index = rx_queue->page_remove &
2542768935aSDaniel Pieczko 			rx_queue->page_ptr_mask;
2552768935aSDaniel Pieczko 
2562768935aSDaniel Pieczko 		/* The next slot in the recycle ring is available, but
2572768935aSDaniel Pieczko 		 * increment page_remove if the read pointer currently
2582768935aSDaniel Pieczko 		 * points here.
2592768935aSDaniel Pieczko 		 */
2602768935aSDaniel Pieczko 		if (read_index == index)
2612768935aSDaniel Pieczko 			++rx_queue->page_remove;
2622768935aSDaniel Pieczko 		rx_queue->page_ring[index] = page;
2632768935aSDaniel Pieczko 		++rx_queue->page_add;
2642768935aSDaniel Pieczko 		return;
2652768935aSDaniel Pieczko 	}
2662768935aSDaniel Pieczko 	++rx_queue->page_recycle_full;
2672768935aSDaniel Pieczko 	efx_unmap_rx_buffer(efx, rx_buf);
2682768935aSDaniel Pieczko 	put_page(rx_buf->page);
2692768935aSDaniel Pieczko }
2702768935aSDaniel Pieczko 
271874aeea5SJeff Kirsher static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
272874aeea5SJeff Kirsher 			       struct efx_rx_buffer *rx_buf)
273874aeea5SJeff Kirsher {
2742768935aSDaniel Pieczko 	/* Release the page reference we hold for the buffer. */
2752768935aSDaniel Pieczko 	if (rx_buf->page)
2762768935aSDaniel Pieczko 		put_page(rx_buf->page);
2772768935aSDaniel Pieczko 
2782768935aSDaniel Pieczko 	/* If this is the last buffer in a page, unmap and free it. */
279179ea7f0SBen Hutchings 	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
2802768935aSDaniel Pieczko 		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
2812768935aSDaniel Pieczko 		efx_free_rx_buffer(rx_buf);
2822768935aSDaniel Pieczko 	}
2832768935aSDaniel Pieczko 	rx_buf->page = NULL;
284874aeea5SJeff Kirsher }
285874aeea5SJeff Kirsher 
2862768935aSDaniel Pieczko /* Recycle the pages that are used by buffers that have just been received. */
287734d4e15SBen Hutchings static void efx_recycle_rx_pages(struct efx_channel *channel,
28885740cdfSBen Hutchings 				 struct efx_rx_buffer *rx_buf,
28985740cdfSBen Hutchings 				 unsigned int n_frags)
290874aeea5SJeff Kirsher {
291874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
292874aeea5SJeff Kirsher 
29385740cdfSBen Hutchings 	do {
2942768935aSDaniel Pieczko 		efx_recycle_rx_page(channel, rx_buf);
29585740cdfSBen Hutchings 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
29685740cdfSBen Hutchings 	} while (--n_frags);
297874aeea5SJeff Kirsher }
298874aeea5SJeff Kirsher 
299734d4e15SBen Hutchings static void efx_discard_rx_packet(struct efx_channel *channel,
300734d4e15SBen Hutchings 				  struct efx_rx_buffer *rx_buf,
301734d4e15SBen Hutchings 				  unsigned int n_frags)
302734d4e15SBen Hutchings {
303734d4e15SBen Hutchings 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
304734d4e15SBen Hutchings 
305734d4e15SBen Hutchings 	efx_recycle_rx_pages(channel, rx_buf, n_frags);
306734d4e15SBen Hutchings 
307734d4e15SBen Hutchings 	do {
308734d4e15SBen Hutchings 		efx_free_rx_buffer(rx_buf);
309734d4e15SBen Hutchings 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
310734d4e15SBen Hutchings 	} while (--n_frags);
311734d4e15SBen Hutchings }
312734d4e15SBen Hutchings 
313874aeea5SJeff Kirsher /**
314874aeea5SJeff Kirsher  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
315874aeea5SJeff Kirsher  * @rx_queue:		RX descriptor queue
31649ce9c2cSBen Hutchings  *
317874aeea5SJeff Kirsher  * This will aim to fill the RX descriptor queue up to
318da9ca505SDavid Riddoch  * @rx_queue->@max_fill. If there is insufficient atomic
319874aeea5SJeff Kirsher  * memory to do so, a slow fill will be scheduled.
320874aeea5SJeff Kirsher  *
321874aeea5SJeff Kirsher  * The caller must provide serialisation (none is used here). In practise,
322874aeea5SJeff Kirsher  * this means this function must run from the NAPI handler, or be called
323874aeea5SJeff Kirsher  * when NAPI is disabled.
324874aeea5SJeff Kirsher  */
325cce28794SJon Cooper void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
326874aeea5SJeff Kirsher {
3271648a23fSDaniel Pieczko 	struct efx_nic *efx = rx_queue->efx;
3281648a23fSDaniel Pieczko 	unsigned int fill_level, batch_size;
329874aeea5SJeff Kirsher 	int space, rc = 0;
330874aeea5SJeff Kirsher 
331d8aec745SBen Hutchings 	if (!rx_queue->refill_enabled)
332d8aec745SBen Hutchings 		return;
333d8aec745SBen Hutchings 
334874aeea5SJeff Kirsher 	/* Calculate current fill level, and exit if we don't need to fill */
335874aeea5SJeff Kirsher 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
336874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
337874aeea5SJeff Kirsher 	if (fill_level >= rx_queue->fast_fill_trigger)
338874aeea5SJeff Kirsher 		goto out;
339874aeea5SJeff Kirsher 
340874aeea5SJeff Kirsher 	/* Record minimum fill level */
341874aeea5SJeff Kirsher 	if (unlikely(fill_level < rx_queue->min_fill)) {
342874aeea5SJeff Kirsher 		if (fill_level)
343874aeea5SJeff Kirsher 			rx_queue->min_fill = fill_level;
344874aeea5SJeff Kirsher 	}
345874aeea5SJeff Kirsher 
3461648a23fSDaniel Pieczko 	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
347da9ca505SDavid Riddoch 	space = rx_queue->max_fill - fill_level;
3481648a23fSDaniel Pieczko 	EFX_BUG_ON_PARANOID(space < batch_size);
349874aeea5SJeff Kirsher 
350874aeea5SJeff Kirsher 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
351874aeea5SJeff Kirsher 		   "RX queue %d fast-filling descriptor ring from"
35297d48a10SAlexandre Rames 		   " level %d to level %d\n",
353874aeea5SJeff Kirsher 		   efx_rx_queue_index(rx_queue), fill_level,
35497d48a10SAlexandre Rames 		   rx_queue->max_fill);
35597d48a10SAlexandre Rames 
356874aeea5SJeff Kirsher 
357874aeea5SJeff Kirsher 	do {
358cce28794SJon Cooper 		rc = efx_init_rx_buffers(rx_queue, atomic);
359874aeea5SJeff Kirsher 		if (unlikely(rc)) {
360874aeea5SJeff Kirsher 			/* Ensure that we don't leave the rx queue empty */
361874aeea5SJeff Kirsher 			if (rx_queue->added_count == rx_queue->removed_count)
362874aeea5SJeff Kirsher 				efx_schedule_slow_fill(rx_queue);
363874aeea5SJeff Kirsher 			goto out;
364874aeea5SJeff Kirsher 		}
3651648a23fSDaniel Pieczko 	} while ((space -= batch_size) >= batch_size);
366874aeea5SJeff Kirsher 
367874aeea5SJeff Kirsher 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
368874aeea5SJeff Kirsher 		   "RX queue %d fast-filled descriptor ring "
369874aeea5SJeff Kirsher 		   "to level %d\n", efx_rx_queue_index(rx_queue),
370874aeea5SJeff Kirsher 		   rx_queue->added_count - rx_queue->removed_count);
371874aeea5SJeff Kirsher 
372874aeea5SJeff Kirsher  out:
373874aeea5SJeff Kirsher 	if (rx_queue->notified_count != rx_queue->added_count)
374874aeea5SJeff Kirsher 		efx_nic_notify_rx_desc(rx_queue);
375874aeea5SJeff Kirsher }
376874aeea5SJeff Kirsher 
377874aeea5SJeff Kirsher void efx_rx_slow_fill(unsigned long context)
378874aeea5SJeff Kirsher {
379874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
380874aeea5SJeff Kirsher 
381874aeea5SJeff Kirsher 	/* Post an event to cause NAPI to run and refill the queue */
3822ae75dacSBen Hutchings 	efx_nic_generate_fill_event(rx_queue);
383874aeea5SJeff Kirsher 	++rx_queue->slow_fill_count;
384874aeea5SJeff Kirsher }
385874aeea5SJeff Kirsher 
386874aeea5SJeff Kirsher static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
387874aeea5SJeff Kirsher 				     struct efx_rx_buffer *rx_buf,
38897d48a10SAlexandre Rames 				     int len)
389874aeea5SJeff Kirsher {
390874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
391874aeea5SJeff Kirsher 	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
392874aeea5SJeff Kirsher 
393874aeea5SJeff Kirsher 	if (likely(len <= max_len))
394874aeea5SJeff Kirsher 		return;
395874aeea5SJeff Kirsher 
396874aeea5SJeff Kirsher 	/* The packet must be discarded, but this is only a fatal error
397874aeea5SJeff Kirsher 	 * if the caller indicated it was
398874aeea5SJeff Kirsher 	 */
399db339569SBen Hutchings 	rx_buf->flags |= EFX_RX_PKT_DISCARD;
400874aeea5SJeff Kirsher 
401874aeea5SJeff Kirsher 	if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
402874aeea5SJeff Kirsher 		if (net_ratelimit())
403874aeea5SJeff Kirsher 			netif_err(efx, rx_err, efx->net_dev,
404874aeea5SJeff Kirsher 				  " RX queue %d seriously overlength "
405874aeea5SJeff Kirsher 				  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
406874aeea5SJeff Kirsher 				  efx_rx_queue_index(rx_queue), len, max_len,
407874aeea5SJeff Kirsher 				  efx->type->rx_buffer_padding);
408874aeea5SJeff Kirsher 		efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
409874aeea5SJeff Kirsher 	} else {
410874aeea5SJeff Kirsher 		if (net_ratelimit())
411874aeea5SJeff Kirsher 			netif_err(efx, rx_err, efx->net_dev,
412874aeea5SJeff Kirsher 				  " RX queue %d overlength RX event "
413874aeea5SJeff Kirsher 				  "(0x%x > 0x%x)\n",
414874aeea5SJeff Kirsher 				  efx_rx_queue_index(rx_queue), len, max_len);
415874aeea5SJeff Kirsher 	}
416874aeea5SJeff Kirsher 
417874aeea5SJeff Kirsher 	efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
418874aeea5SJeff Kirsher }
419874aeea5SJeff Kirsher 
42061321d92SBen Hutchings /* Pass a received packet up through GRO.  GRO can handle pages
42161321d92SBen Hutchings  * regardless of checksum state and skbs with a good checksum.
422874aeea5SJeff Kirsher  */
42385740cdfSBen Hutchings static void
42485740cdfSBen Hutchings efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
42585740cdfSBen Hutchings 		  unsigned int n_frags, u8 *eh)
426874aeea5SJeff Kirsher {
427874aeea5SJeff Kirsher 	struct napi_struct *napi = &channel->napi_str;
428874aeea5SJeff Kirsher 	gro_result_t gro_result;
429874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
430874aeea5SJeff Kirsher 	struct sk_buff *skb;
431874aeea5SJeff Kirsher 
432874aeea5SJeff Kirsher 	skb = napi_get_frags(napi);
43385740cdfSBen Hutchings 	if (unlikely(!skb)) {
43485740cdfSBen Hutchings 		while (n_frags--) {
43585740cdfSBen Hutchings 			put_page(rx_buf->page);
43685740cdfSBen Hutchings 			rx_buf->page = NULL;
43785740cdfSBen Hutchings 			rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
43885740cdfSBen Hutchings 		}
439874aeea5SJeff Kirsher 		return;
440874aeea5SJeff Kirsher 	}
441874aeea5SJeff Kirsher 
442874aeea5SJeff Kirsher 	if (efx->net_dev->features & NETIF_F_RXHASH)
443c7cb38afSTom Herbert 		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
444c7cb38afSTom Herbert 			     PKT_HASH_TYPE_L3);
445db339569SBen Hutchings 	skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
446db339569SBen Hutchings 			  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
447874aeea5SJeff Kirsher 
44885740cdfSBen Hutchings 	for (;;) {
44985740cdfSBen Hutchings 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
45085740cdfSBen Hutchings 				   rx_buf->page, rx_buf->page_offset,
45185740cdfSBen Hutchings 				   rx_buf->len);
45285740cdfSBen Hutchings 		rx_buf->page = NULL;
45385740cdfSBen Hutchings 		skb->len += rx_buf->len;
45485740cdfSBen Hutchings 		if (skb_shinfo(skb)->nr_frags == n_frags)
45585740cdfSBen Hutchings 			break;
45685740cdfSBen Hutchings 
45785740cdfSBen Hutchings 		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
45885740cdfSBen Hutchings 	}
45985740cdfSBen Hutchings 
46085740cdfSBen Hutchings 	skb->data_len = skb->len;
46185740cdfSBen Hutchings 	skb->truesize += n_frags * efx->rx_buffer_truesize;
46285740cdfSBen Hutchings 
46379d68b37SStuart Hodgson 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
464874aeea5SJeff Kirsher 
465874aeea5SJeff Kirsher 	gro_result = napi_gro_frags(napi);
46697d48a10SAlexandre Rames 	if (gro_result != GRO_DROP)
467874aeea5SJeff Kirsher 		channel->irq_mod_score += 2;
468874aeea5SJeff Kirsher }
46997d48a10SAlexandre Rames 
47085740cdfSBen Hutchings /* Allocate and construct an SKB around page fragments */
47197d48a10SAlexandre Rames static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
47297d48a10SAlexandre Rames 				     struct efx_rx_buffer *rx_buf,
47385740cdfSBen Hutchings 				     unsigned int n_frags,
47497d48a10SAlexandre Rames 				     u8 *eh, int hdr_len)
47597d48a10SAlexandre Rames {
47697d48a10SAlexandre Rames 	struct efx_nic *efx = channel->efx;
47797d48a10SAlexandre Rames 	struct sk_buff *skb;
47897d48a10SAlexandre Rames 
47997d48a10SAlexandre Rames 	/* Allocate an SKB to store the headers */
4802ccd0b19SBen Hutchings 	skb = netdev_alloc_skb(efx->net_dev,
4812ccd0b19SBen Hutchings 			       efx->rx_ip_align + efx->rx_prefix_size +
4822ccd0b19SBen Hutchings 			       hdr_len);
483e4d112e4SEdward Cree 	if (unlikely(skb == NULL)) {
484e4d112e4SEdward Cree 		atomic_inc(&efx->n_rx_noskb_drops);
48597d48a10SAlexandre Rames 		return NULL;
486e4d112e4SEdward Cree 	}
48797d48a10SAlexandre Rames 
48897d48a10SAlexandre Rames 	EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
48997d48a10SAlexandre Rames 
4902ccd0b19SBen Hutchings 	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
4912ccd0b19SBen Hutchings 	       efx->rx_prefix_size + hdr_len);
4922ccd0b19SBen Hutchings 	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
4932ccd0b19SBen Hutchings 	__skb_put(skb, hdr_len);
49497d48a10SAlexandre Rames 
49585740cdfSBen Hutchings 	/* Append the remaining page(s) onto the frag list */
49697d48a10SAlexandre Rames 	if (rx_buf->len > hdr_len) {
49785740cdfSBen Hutchings 		rx_buf->page_offset += hdr_len;
49885740cdfSBen Hutchings 		rx_buf->len -= hdr_len;
49985740cdfSBen Hutchings 
50085740cdfSBen Hutchings 		for (;;) {
50185740cdfSBen Hutchings 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
50285740cdfSBen Hutchings 					   rx_buf->page, rx_buf->page_offset,
50385740cdfSBen Hutchings 					   rx_buf->len);
50485740cdfSBen Hutchings 			rx_buf->page = NULL;
50585740cdfSBen Hutchings 			skb->len += rx_buf->len;
50685740cdfSBen Hutchings 			skb->data_len += rx_buf->len;
50785740cdfSBen Hutchings 			if (skb_shinfo(skb)->nr_frags == n_frags)
50885740cdfSBen Hutchings 				break;
50985740cdfSBen Hutchings 
51085740cdfSBen Hutchings 			rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
51185740cdfSBen Hutchings 		}
51297d48a10SAlexandre Rames 	} else {
51397d48a10SAlexandre Rames 		__free_pages(rx_buf->page, efx->rx_buffer_order);
51485740cdfSBen Hutchings 		rx_buf->page = NULL;
51585740cdfSBen Hutchings 		n_frags = 0;
51697d48a10SAlexandre Rames 	}
51797d48a10SAlexandre Rames 
51885740cdfSBen Hutchings 	skb->truesize += n_frags * efx->rx_buffer_truesize;
51997d48a10SAlexandre Rames 
52097d48a10SAlexandre Rames 	/* Move past the ethernet header */
52197d48a10SAlexandre Rames 	skb->protocol = eth_type_trans(skb, efx->net_dev);
52297d48a10SAlexandre Rames 
52397d48a10SAlexandre Rames 	return skb;
524874aeea5SJeff Kirsher }
525874aeea5SJeff Kirsher 
526874aeea5SJeff Kirsher void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
52785740cdfSBen Hutchings 		   unsigned int n_frags, unsigned int len, u16 flags)
528874aeea5SJeff Kirsher {
529874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
530874aeea5SJeff Kirsher 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
531874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
532874aeea5SJeff Kirsher 
533*8ccf3800SAndrew Rybchenko 	rx_queue->rx_packets++;
534*8ccf3800SAndrew Rybchenko 
535874aeea5SJeff Kirsher 	rx_buf = efx_rx_buffer(rx_queue, index);
536179ea7f0SBen Hutchings 	rx_buf->flags |= flags;
537874aeea5SJeff Kirsher 
53885740cdfSBen Hutchings 	/* Validate the number of fragments and completed length */
53985740cdfSBen Hutchings 	if (n_frags == 1) {
5403dced740SBen Hutchings 		if (!(flags & EFX_RX_PKT_PREFIX_LEN))
54197d48a10SAlexandre Rames 			efx_rx_packet__check_len(rx_queue, rx_buf, len);
54285740cdfSBen Hutchings 	} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
543e8c68c0aSJon Cooper 		   unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
544e8c68c0aSJon Cooper 		   unlikely(len > n_frags * efx->rx_dma_len) ||
54585740cdfSBen Hutchings 		   unlikely(!efx->rx_scatter)) {
54685740cdfSBen Hutchings 		/* If this isn't an explicit discard request, either
54785740cdfSBen Hutchings 		 * the hardware or the driver is broken.
54885740cdfSBen Hutchings 		 */
54985740cdfSBen Hutchings 		WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
55085740cdfSBen Hutchings 		rx_buf->flags |= EFX_RX_PKT_DISCARD;
55185740cdfSBen Hutchings 	}
552874aeea5SJeff Kirsher 
553874aeea5SJeff Kirsher 	netif_vdbg(efx, rx_status, efx->net_dev,
55485740cdfSBen Hutchings 		   "RX queue %d received ids %x-%x len %d %s%s\n",
555874aeea5SJeff Kirsher 		   efx_rx_queue_index(rx_queue), index,
55685740cdfSBen Hutchings 		   (index + n_frags - 1) & rx_queue->ptr_mask, len,
557db339569SBen Hutchings 		   (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
558db339569SBen Hutchings 		   (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
559874aeea5SJeff Kirsher 
56085740cdfSBen Hutchings 	/* Discard packet, if instructed to do so.  Process the
56185740cdfSBen Hutchings 	 * previous receive first.
56285740cdfSBen Hutchings 	 */
563db339569SBen Hutchings 	if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
56485740cdfSBen Hutchings 		efx_rx_flush_packet(channel);
565734d4e15SBen Hutchings 		efx_discard_rx_packet(channel, rx_buf, n_frags);
56685740cdfSBen Hutchings 		return;
567874aeea5SJeff Kirsher 	}
568874aeea5SJeff Kirsher 
5693dced740SBen Hutchings 	if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
57085740cdfSBen Hutchings 		rx_buf->len = len;
57185740cdfSBen Hutchings 
5722768935aSDaniel Pieczko 	/* Release and/or sync the DMA mapping - assumes all RX buffers
5732768935aSDaniel Pieczko 	 * consumed in-order per RX queue.
574874aeea5SJeff Kirsher 	 */
5752768935aSDaniel Pieczko 	efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
576874aeea5SJeff Kirsher 
577874aeea5SJeff Kirsher 	/* Prefetch nice and early so data will (hopefully) be in cache by
578874aeea5SJeff Kirsher 	 * the time we look at it.
579874aeea5SJeff Kirsher 	 */
5805036b7c7SBen Hutchings 	prefetch(efx_rx_buf_va(rx_buf));
581874aeea5SJeff Kirsher 
58243a3739dSJon Cooper 	rx_buf->page_offset += efx->rx_prefix_size;
58343a3739dSJon Cooper 	rx_buf->len -= efx->rx_prefix_size;
58485740cdfSBen Hutchings 
58585740cdfSBen Hutchings 	if (n_frags > 1) {
58685740cdfSBen Hutchings 		/* Release/sync DMA mapping for additional fragments.
58785740cdfSBen Hutchings 		 * Fix length for last fragment.
58885740cdfSBen Hutchings 		 */
58985740cdfSBen Hutchings 		unsigned int tail_frags = n_frags - 1;
59085740cdfSBen Hutchings 
59185740cdfSBen Hutchings 		for (;;) {
59285740cdfSBen Hutchings 			rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
59385740cdfSBen Hutchings 			if (--tail_frags == 0)
59485740cdfSBen Hutchings 				break;
595e8c68c0aSJon Cooper 			efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
59685740cdfSBen Hutchings 		}
597e8c68c0aSJon Cooper 		rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
5982768935aSDaniel Pieczko 		efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
59985740cdfSBen Hutchings 	}
600b74e3e8cSBen Hutchings 
601734d4e15SBen Hutchings 	/* All fragments have been DMA-synced, so recycle pages. */
6022768935aSDaniel Pieczko 	rx_buf = efx_rx_buffer(rx_queue, index);
603734d4e15SBen Hutchings 	efx_recycle_rx_pages(channel, rx_buf, n_frags);
6042768935aSDaniel Pieczko 
605874aeea5SJeff Kirsher 	/* Pipeline receives so that we give time for packet headers to be
606874aeea5SJeff Kirsher 	 * prefetched into cache.
607874aeea5SJeff Kirsher 	 */
608ff734ef4SBen Hutchings 	efx_rx_flush_packet(channel);
60985740cdfSBen Hutchings 	channel->rx_pkt_n_frags = n_frags;
61085740cdfSBen Hutchings 	channel->rx_pkt_index = index;
611874aeea5SJeff Kirsher }
612874aeea5SJeff Kirsher 
61397d48a10SAlexandre Rames static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
61485740cdfSBen Hutchings 			   struct efx_rx_buffer *rx_buf,
61585740cdfSBen Hutchings 			   unsigned int n_frags)
6161ddceb4cSBen Hutchings {
6171ddceb4cSBen Hutchings 	struct sk_buff *skb;
61897d48a10SAlexandre Rames 	u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
6191ddceb4cSBen Hutchings 
62085740cdfSBen Hutchings 	skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
62197d48a10SAlexandre Rames 	if (unlikely(skb == NULL)) {
6222768935aSDaniel Pieczko 		efx_free_rx_buffer(rx_buf);
62397d48a10SAlexandre Rames 		return;
62497d48a10SAlexandre Rames 	}
62597d48a10SAlexandre Rames 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
6261ddceb4cSBen Hutchings 
6271ddceb4cSBen Hutchings 	/* Set the SKB flags */
6281ddceb4cSBen Hutchings 	skb_checksum_none_assert(skb);
629c99dffc4SJon Cooper 	if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
630c99dffc4SJon Cooper 		skb->ip_summed = CHECKSUM_UNNECESSARY;
6311ddceb4cSBen Hutchings 
632bd9a265dSJon Cooper 	efx_rx_skb_attach_timestamp(channel, skb);
633bd9a265dSJon Cooper 
634c31e5f9fSStuart Hodgson 	if (channel->type->receive_skb)
6354a74dc65SBen Hutchings 		if (channel->type->receive_skb(channel, skb))
63697d48a10SAlexandre Rames 			return;
6374a74dc65SBen Hutchings 
6384a74dc65SBen Hutchings 	/* Pass the packet up */
6391ddceb4cSBen Hutchings 	netif_receive_skb(skb);
6401ddceb4cSBen Hutchings }
6411ddceb4cSBen Hutchings 
642874aeea5SJeff Kirsher /* Handle a received packet.  Second half: Touches packet payload. */
64385740cdfSBen Hutchings void __efx_rx_packet(struct efx_channel *channel)
644874aeea5SJeff Kirsher {
645874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
64685740cdfSBen Hutchings 	struct efx_rx_buffer *rx_buf =
64785740cdfSBen Hutchings 		efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
648b74e3e8cSBen Hutchings 	u8 *eh = efx_rx_buf_va(rx_buf);
649874aeea5SJeff Kirsher 
6503dced740SBen Hutchings 	/* Read length from the prefix if necessary.  This already
6513dced740SBen Hutchings 	 * excludes the length of the prefix itself.
6523dced740SBen Hutchings 	 */
6533dced740SBen Hutchings 	if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
6543dced740SBen Hutchings 		rx_buf->len = le16_to_cpup((__le16 *)
6553dced740SBen Hutchings 					   (eh + efx->rx_packet_len_offset));
6563dced740SBen Hutchings 
657874aeea5SJeff Kirsher 	/* If we're in loopback test, then pass the packet directly to the
658874aeea5SJeff Kirsher 	 * loopback layer, and free the rx_buf here
659874aeea5SJeff Kirsher 	 */
660874aeea5SJeff Kirsher 	if (unlikely(efx->loopback_selftest)) {
661874aeea5SJeff Kirsher 		efx_loopback_rx_packet(efx, eh, rx_buf->len);
6622768935aSDaniel Pieczko 		efx_free_rx_buffer(rx_buf);
66385740cdfSBen Hutchings 		goto out;
664874aeea5SJeff Kirsher 	}
665874aeea5SJeff Kirsher 
666874aeea5SJeff Kirsher 	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
667db339569SBen Hutchings 		rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
668874aeea5SJeff Kirsher 
669e79255deSBen Hutchings 	if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
67085740cdfSBen Hutchings 		efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
6711ddceb4cSBen Hutchings 	else
67285740cdfSBen Hutchings 		efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
67385740cdfSBen Hutchings out:
67485740cdfSBen Hutchings 	channel->rx_pkt_n_frags = 0;
675874aeea5SJeff Kirsher }
676874aeea5SJeff Kirsher 
677874aeea5SJeff Kirsher int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
678874aeea5SJeff Kirsher {
679874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
680874aeea5SJeff Kirsher 	unsigned int entries;
681874aeea5SJeff Kirsher 	int rc;
682874aeea5SJeff Kirsher 
683874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
684874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
685874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
686874aeea5SJeff Kirsher 	rx_queue->ptr_mask = entries - 1;
687874aeea5SJeff Kirsher 
688874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
689874aeea5SJeff Kirsher 		  "creating RX queue %d size %#x mask %#x\n",
690874aeea5SJeff Kirsher 		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
691874aeea5SJeff Kirsher 		  rx_queue->ptr_mask);
692874aeea5SJeff Kirsher 
693874aeea5SJeff Kirsher 	/* Allocate RX buffers */
694c2e4e25aSThomas Meyer 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
695874aeea5SJeff Kirsher 				   GFP_KERNEL);
696874aeea5SJeff Kirsher 	if (!rx_queue->buffer)
697874aeea5SJeff Kirsher 		return -ENOMEM;
698874aeea5SJeff Kirsher 
699874aeea5SJeff Kirsher 	rc = efx_nic_probe_rx(rx_queue);
700874aeea5SJeff Kirsher 	if (rc) {
701874aeea5SJeff Kirsher 		kfree(rx_queue->buffer);
702874aeea5SJeff Kirsher 		rx_queue->buffer = NULL;
703874aeea5SJeff Kirsher 	}
7042768935aSDaniel Pieczko 
705874aeea5SJeff Kirsher 	return rc;
706874aeea5SJeff Kirsher }
707874aeea5SJeff Kirsher 
708debd0034Sstephen hemminger static void efx_init_rx_recycle_ring(struct efx_nic *efx,
7092768935aSDaniel Pieczko 				     struct efx_rx_queue *rx_queue)
7102768935aSDaniel Pieczko {
7112768935aSDaniel Pieczko 	unsigned int bufs_in_recycle_ring, page_ring_size;
7122768935aSDaniel Pieczko 
7132768935aSDaniel Pieczko 	/* Set the RX recycle ring size */
7142768935aSDaniel Pieczko #ifdef CONFIG_PPC64
7152768935aSDaniel Pieczko 	bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
7162768935aSDaniel Pieczko #else
717636d73daSBen Hutchings 	if (iommu_present(&pci_bus_type))
7182768935aSDaniel Pieczko 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
7192768935aSDaniel Pieczko 	else
7202768935aSDaniel Pieczko 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
7212768935aSDaniel Pieczko #endif /* CONFIG_PPC64 */
7222768935aSDaniel Pieczko 
7232768935aSDaniel Pieczko 	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
7242768935aSDaniel Pieczko 					    efx->rx_bufs_per_page);
7252768935aSDaniel Pieczko 	rx_queue->page_ring = kcalloc(page_ring_size,
7262768935aSDaniel Pieczko 				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
7272768935aSDaniel Pieczko 	rx_queue->page_ptr_mask = page_ring_size - 1;
7282768935aSDaniel Pieczko }
7292768935aSDaniel Pieczko 
730874aeea5SJeff Kirsher void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
731874aeea5SJeff Kirsher {
732874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
73364235187SDavid Riddoch 	unsigned int max_fill, trigger, max_trigger;
734874aeea5SJeff Kirsher 
735874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
736874aeea5SJeff Kirsher 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
737874aeea5SJeff Kirsher 
738874aeea5SJeff Kirsher 	/* Initialise ptr fields */
739874aeea5SJeff Kirsher 	rx_queue->added_count = 0;
740874aeea5SJeff Kirsher 	rx_queue->notified_count = 0;
741874aeea5SJeff Kirsher 	rx_queue->removed_count = 0;
742874aeea5SJeff Kirsher 	rx_queue->min_fill = -1U;
7432768935aSDaniel Pieczko 	efx_init_rx_recycle_ring(efx, rx_queue);
7442768935aSDaniel Pieczko 
7452768935aSDaniel Pieczko 	rx_queue->page_remove = 0;
7462768935aSDaniel Pieczko 	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
7472768935aSDaniel Pieczko 	rx_queue->page_recycle_count = 0;
7482768935aSDaniel Pieczko 	rx_queue->page_recycle_failed = 0;
7492768935aSDaniel Pieczko 	rx_queue->page_recycle_full = 0;
750874aeea5SJeff Kirsher 
751874aeea5SJeff Kirsher 	/* Initialise limit fields */
752874aeea5SJeff Kirsher 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
7531648a23fSDaniel Pieczko 	max_trigger =
7541648a23fSDaniel Pieczko 		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
75564235187SDavid Riddoch 	if (rx_refill_threshold != 0) {
756874aeea5SJeff Kirsher 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
75764235187SDavid Riddoch 		if (trigger > max_trigger)
75864235187SDavid Riddoch 			trigger = max_trigger;
75964235187SDavid Riddoch 	} else {
76064235187SDavid Riddoch 		trigger = max_trigger;
76164235187SDavid Riddoch 	}
762874aeea5SJeff Kirsher 
763874aeea5SJeff Kirsher 	rx_queue->max_fill = max_fill;
764874aeea5SJeff Kirsher 	rx_queue->fast_fill_trigger = trigger;
765d8aec745SBen Hutchings 	rx_queue->refill_enabled = true;
766874aeea5SJeff Kirsher 
767874aeea5SJeff Kirsher 	/* Set up RX descriptor ring */
768874aeea5SJeff Kirsher 	efx_nic_init_rx(rx_queue);
769874aeea5SJeff Kirsher }
770874aeea5SJeff Kirsher 
771874aeea5SJeff Kirsher void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
772874aeea5SJeff Kirsher {
773874aeea5SJeff Kirsher 	int i;
7742768935aSDaniel Pieczko 	struct efx_nic *efx = rx_queue->efx;
775874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
776874aeea5SJeff Kirsher 
777874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
778874aeea5SJeff Kirsher 		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
779874aeea5SJeff Kirsher 
780874aeea5SJeff Kirsher 	del_timer_sync(&rx_queue->slow_fill);
781874aeea5SJeff Kirsher 
7822768935aSDaniel Pieczko 	/* Release RX buffers from the current read ptr to the write ptr */
783874aeea5SJeff Kirsher 	if (rx_queue->buffer) {
7842768935aSDaniel Pieczko 		for (i = rx_queue->removed_count; i < rx_queue->added_count;
7852768935aSDaniel Pieczko 		     i++) {
7862768935aSDaniel Pieczko 			unsigned index = i & rx_queue->ptr_mask;
7872768935aSDaniel Pieczko 			rx_buf = efx_rx_buffer(rx_queue, index);
788874aeea5SJeff Kirsher 			efx_fini_rx_buffer(rx_queue, rx_buf);
789874aeea5SJeff Kirsher 		}
790874aeea5SJeff Kirsher 	}
7912768935aSDaniel Pieczko 
7922768935aSDaniel Pieczko 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
7932768935aSDaniel Pieczko 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
7942768935aSDaniel Pieczko 		struct page *page = rx_queue->page_ring[i];
7952768935aSDaniel Pieczko 		struct efx_rx_page_state *state;
7962768935aSDaniel Pieczko 
7972768935aSDaniel Pieczko 		if (page == NULL)
7982768935aSDaniel Pieczko 			continue;
7992768935aSDaniel Pieczko 
8002768935aSDaniel Pieczko 		state = page_address(page);
8012768935aSDaniel Pieczko 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
8022768935aSDaniel Pieczko 			       PAGE_SIZE << efx->rx_buffer_order,
8032768935aSDaniel Pieczko 			       DMA_FROM_DEVICE);
8042768935aSDaniel Pieczko 		put_page(page);
8052768935aSDaniel Pieczko 	}
8062768935aSDaniel Pieczko 	kfree(rx_queue->page_ring);
8072768935aSDaniel Pieczko 	rx_queue->page_ring = NULL;
808874aeea5SJeff Kirsher }
809874aeea5SJeff Kirsher 
810874aeea5SJeff Kirsher void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
811874aeea5SJeff Kirsher {
812874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
813874aeea5SJeff Kirsher 		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
814874aeea5SJeff Kirsher 
815874aeea5SJeff Kirsher 	efx_nic_remove_rx(rx_queue);
816874aeea5SJeff Kirsher 
817874aeea5SJeff Kirsher 	kfree(rx_queue->buffer);
818874aeea5SJeff Kirsher 	rx_queue->buffer = NULL;
819874aeea5SJeff Kirsher }
820874aeea5SJeff Kirsher 
821874aeea5SJeff Kirsher 
822874aeea5SJeff Kirsher module_param(rx_refill_threshold, uint, 0444);
823874aeea5SJeff Kirsher MODULE_PARM_DESC(rx_refill_threshold,
82464235187SDavid Riddoch 		 "RX descriptor ring refill threshold (%)");
825874aeea5SJeff Kirsher 
826add72477SBen Hutchings #ifdef CONFIG_RFS_ACCEL
827add72477SBen Hutchings 
828add72477SBen Hutchings int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
829add72477SBen Hutchings 		   u16 rxq_index, u32 flow_id)
830add72477SBen Hutchings {
831add72477SBen Hutchings 	struct efx_nic *efx = netdev_priv(net_dev);
832add72477SBen Hutchings 	struct efx_channel *channel;
833add72477SBen Hutchings 	struct efx_filter_spec spec;
834add72477SBen Hutchings 	const __be16 *ports;
835c47b2d9dSBen Hutchings 	__be16 ether_type;
836add72477SBen Hutchings 	int nhoff;
837add72477SBen Hutchings 	int rc;
838add72477SBen Hutchings 
839c47b2d9dSBen Hutchings 	/* The core RPS/RFS code has already parsed and validated
840c47b2d9dSBen Hutchings 	 * VLAN, IP and transport headers.  We assume they are in the
841c47b2d9dSBen Hutchings 	 * header area.
842c47b2d9dSBen Hutchings 	 */
843add72477SBen Hutchings 
844add72477SBen Hutchings 	if (skb->protocol == htons(ETH_P_8021Q)) {
845c47b2d9dSBen Hutchings 		const struct vlan_hdr *vh =
846c47b2d9dSBen Hutchings 			(const struct vlan_hdr *)skb->data;
847add72477SBen Hutchings 
848c47b2d9dSBen Hutchings 		/* We can't filter on the IP 5-tuple and the vlan
849c47b2d9dSBen Hutchings 		 * together, so just strip the vlan header and filter
850c47b2d9dSBen Hutchings 		 * on the IP part.
851add72477SBen Hutchings 		 */
852c47b2d9dSBen Hutchings 		EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
853c47b2d9dSBen Hutchings 		ether_type = vh->h_vlan_encapsulated_proto;
854c47b2d9dSBen Hutchings 		nhoff = sizeof(struct vlan_hdr);
855c47b2d9dSBen Hutchings 	} else {
856c47b2d9dSBen Hutchings 		ether_type = skb->protocol;
857c47b2d9dSBen Hutchings 		nhoff = 0;
858add72477SBen Hutchings 	}
859add72477SBen Hutchings 
860c47b2d9dSBen Hutchings 	if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
861add72477SBen Hutchings 		return -EPROTONOSUPPORT;
862add72477SBen Hutchings 
863add72477SBen Hutchings 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
864add72477SBen Hutchings 			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
865add72477SBen Hutchings 			   rxq_index);
866c47b2d9dSBen Hutchings 	spec.match_flags =
867c47b2d9dSBen Hutchings 		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
868c47b2d9dSBen Hutchings 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
869c47b2d9dSBen Hutchings 		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
870c47b2d9dSBen Hutchings 	spec.ether_type = ether_type;
871c47b2d9dSBen Hutchings 
872c47b2d9dSBen Hutchings 	if (ether_type == htons(ETH_P_IP)) {
873c47b2d9dSBen Hutchings 		const struct iphdr *ip =
874c47b2d9dSBen Hutchings 			(const struct iphdr *)(skb->data + nhoff);
875c47b2d9dSBen Hutchings 
876c47b2d9dSBen Hutchings 		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
877c47b2d9dSBen Hutchings 		if (ip_is_fragment(ip))
878c47b2d9dSBen Hutchings 			return -EPROTONOSUPPORT;
879c47b2d9dSBen Hutchings 		spec.ip_proto = ip->protocol;
880c47b2d9dSBen Hutchings 		spec.rem_host[0] = ip->saddr;
881c47b2d9dSBen Hutchings 		spec.loc_host[0] = ip->daddr;
882c47b2d9dSBen Hutchings 		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
883c47b2d9dSBen Hutchings 		ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
884c47b2d9dSBen Hutchings 	} else {
885c47b2d9dSBen Hutchings 		const struct ipv6hdr *ip6 =
886c47b2d9dSBen Hutchings 			(const struct ipv6hdr *)(skb->data + nhoff);
887c47b2d9dSBen Hutchings 
888c47b2d9dSBen Hutchings 		EFX_BUG_ON_PARANOID(skb_headlen(skb) <
889c47b2d9dSBen Hutchings 				    nhoff + sizeof(*ip6) + 4);
890c47b2d9dSBen Hutchings 		spec.ip_proto = ip6->nexthdr;
891c47b2d9dSBen Hutchings 		memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
892c47b2d9dSBen Hutchings 		memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
893c47b2d9dSBen Hutchings 		ports = (const __be16 *)(ip6 + 1);
894c47b2d9dSBen Hutchings 	}
895c47b2d9dSBen Hutchings 
896c47b2d9dSBen Hutchings 	spec.rem_port = ports[0];
897c47b2d9dSBen Hutchings 	spec.loc_port = ports[1];
898add72477SBen Hutchings 
899add72477SBen Hutchings 	rc = efx->type->filter_rfs_insert(efx, &spec);
900add72477SBen Hutchings 	if (rc < 0)
901add72477SBen Hutchings 		return rc;
902add72477SBen Hutchings 
903add72477SBen Hutchings 	/* Remember this so we can check whether to expire the filter later */
904add72477SBen Hutchings 	efx->rps_flow_id[rc] = flow_id;
905add72477SBen Hutchings 	channel = efx_get_channel(efx, skb_get_rx_queue(skb));
906add72477SBen Hutchings 	++channel->rfs_filters_added;
907add72477SBen Hutchings 
908c47b2d9dSBen Hutchings 	if (ether_type == htons(ETH_P_IP))
909add72477SBen Hutchings 		netif_info(efx, rx_status, efx->net_dev,
910add72477SBen Hutchings 			   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
911c47b2d9dSBen Hutchings 			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
912c47b2d9dSBen Hutchings 			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
913c47b2d9dSBen Hutchings 			   ntohs(ports[1]), rxq_index, flow_id, rc);
914c47b2d9dSBen Hutchings 	else
915c47b2d9dSBen Hutchings 		netif_info(efx, rx_status, efx->net_dev,
916c47b2d9dSBen Hutchings 			   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
917c47b2d9dSBen Hutchings 			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
918c47b2d9dSBen Hutchings 			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
919c47b2d9dSBen Hutchings 			   ntohs(ports[1]), rxq_index, flow_id, rc);
920add72477SBen Hutchings 
921add72477SBen Hutchings 	return rc;
922add72477SBen Hutchings }
923add72477SBen Hutchings 
924add72477SBen Hutchings bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
925add72477SBen Hutchings {
926add72477SBen Hutchings 	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
927add72477SBen Hutchings 	unsigned int index, size;
928add72477SBen Hutchings 	u32 flow_id;
929add72477SBen Hutchings 
930add72477SBen Hutchings 	if (!spin_trylock_bh(&efx->filter_lock))
931add72477SBen Hutchings 		return false;
932add72477SBen Hutchings 
933add72477SBen Hutchings 	expire_one = efx->type->filter_rfs_expire_one;
934add72477SBen Hutchings 	index = efx->rps_expire_index;
935add72477SBen Hutchings 	size = efx->type->max_rx_ip_filters;
936add72477SBen Hutchings 	while (quota--) {
937add72477SBen Hutchings 		flow_id = efx->rps_flow_id[index];
938add72477SBen Hutchings 		if (expire_one(efx, flow_id, index))
939add72477SBen Hutchings 			netif_info(efx, rx_status, efx->net_dev,
940add72477SBen Hutchings 				   "expired filter %d [flow %u]\n",
941add72477SBen Hutchings 				   index, flow_id);
942add72477SBen Hutchings 		if (++index == size)
943add72477SBen Hutchings 			index = 0;
944add72477SBen Hutchings 	}
945add72477SBen Hutchings 	efx->rps_expire_index = index;
946add72477SBen Hutchings 
947add72477SBen Hutchings 	spin_unlock_bh(&efx->filter_lock);
948add72477SBen Hutchings 	return true;
949add72477SBen Hutchings }
950add72477SBen Hutchings 
951add72477SBen Hutchings #endif /* CONFIG_RFS_ACCEL */
952b883d0bdSBen Hutchings 
953b883d0bdSBen Hutchings /**
954b883d0bdSBen Hutchings  * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
955b883d0bdSBen Hutchings  * @spec: Specification to test
956b883d0bdSBen Hutchings  *
957b883d0bdSBen Hutchings  * Return: %true if the specification is a non-drop RX filter that
958b883d0bdSBen Hutchings  * matches a local MAC address I/G bit value of 1 or matches a local
959b883d0bdSBen Hutchings  * IPv4 or IPv6 address value in the respective multicast address
960b883d0bdSBen Hutchings  * range.  Otherwise %false.
961b883d0bdSBen Hutchings  */
962b883d0bdSBen Hutchings bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
963b883d0bdSBen Hutchings {
964b883d0bdSBen Hutchings 	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
965b883d0bdSBen Hutchings 	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
966b883d0bdSBen Hutchings 		return false;
967b883d0bdSBen Hutchings 
968b883d0bdSBen Hutchings 	if (spec->match_flags &
969b883d0bdSBen Hutchings 	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
970b883d0bdSBen Hutchings 	    is_multicast_ether_addr(spec->loc_mac))
971b883d0bdSBen Hutchings 		return true;
972b883d0bdSBen Hutchings 
973b883d0bdSBen Hutchings 	if ((spec->match_flags &
974b883d0bdSBen Hutchings 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
975b883d0bdSBen Hutchings 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
976b883d0bdSBen Hutchings 		if (spec->ether_type == htons(ETH_P_IP) &&
977b883d0bdSBen Hutchings 		    ipv4_is_multicast(spec->loc_host[0]))
978b883d0bdSBen Hutchings 			return true;
979b883d0bdSBen Hutchings 		if (spec->ether_type == htons(ETH_P_IPV6) &&
980b883d0bdSBen Hutchings 		    ((const u8 *)spec->loc_host)[0] == 0xff)
981b883d0bdSBen Hutchings 			return true;
982b883d0bdSBen Hutchings 	}
983b883d0bdSBen Hutchings 
984b883d0bdSBen Hutchings 	return false;
985b883d0bdSBen Hutchings }
986