xref: /linux/drivers/net/ethernet/sfc/rx.c (revision 36763266bbe8a2e93a7639b99bac2fee2c42bc5b)
1874aeea5SJeff Kirsher /****************************************************************************
2f7a6d2c4SBen Hutchings  * Driver for Solarflare network controllers and boards
3874aeea5SJeff Kirsher  * Copyright 2005-2006 Fen Systems Ltd.
4f7a6d2c4SBen Hutchings  * Copyright 2005-2013 Solarflare Communications Inc.
5874aeea5SJeff Kirsher  *
6874aeea5SJeff Kirsher  * This program is free software; you can redistribute it and/or modify it
7874aeea5SJeff Kirsher  * under the terms of the GNU General Public License version 2 as published
8874aeea5SJeff Kirsher  * by the Free Software Foundation, incorporated herein by reference.
9874aeea5SJeff Kirsher  */
10874aeea5SJeff Kirsher 
11874aeea5SJeff Kirsher #include <linux/socket.h>
12874aeea5SJeff Kirsher #include <linux/in.h>
13874aeea5SJeff Kirsher #include <linux/slab.h>
14874aeea5SJeff Kirsher #include <linux/ip.h>
15c47b2d9dSBen Hutchings #include <linux/ipv6.h>
16874aeea5SJeff Kirsher #include <linux/tcp.h>
17874aeea5SJeff Kirsher #include <linux/udp.h>
18874aeea5SJeff Kirsher #include <linux/prefetch.h>
196eb07cafSPaul Gortmaker #include <linux/moduleparam.h>
202768935aSDaniel Pieczko #include <linux/iommu.h>
21874aeea5SJeff Kirsher #include <net/ip.h>
22874aeea5SJeff Kirsher #include <net/checksum.h>
23874aeea5SJeff Kirsher #include "net_driver.h"
24874aeea5SJeff Kirsher #include "efx.h"
25add72477SBen Hutchings #include "filter.h"
26874aeea5SJeff Kirsher #include "nic.h"
27874aeea5SJeff Kirsher #include "selftest.h"
28874aeea5SJeff Kirsher #include "workarounds.h"
29874aeea5SJeff Kirsher 
301648a23fSDaniel Pieczko /* Preferred number of descriptors to fill at once */
311648a23fSDaniel Pieczko #define EFX_RX_PREFERRED_BATCH 8U
32874aeea5SJeff Kirsher 
332768935aSDaniel Pieczko /* Number of RX buffers to recycle pages for.  When creating the RX page recycle
342768935aSDaniel Pieczko  * ring, this number is divided by the number of buffers per page to calculate
352768935aSDaniel Pieczko  * the number of pages to store in the RX page recycle ring.
362768935aSDaniel Pieczko  */
372768935aSDaniel Pieczko #define EFX_RECYCLE_RING_SIZE_IOMMU 4096
381648a23fSDaniel Pieczko #define EFX_RECYCLE_RING_SIZE_NOIOMMU (2 * EFX_RX_PREFERRED_BATCH)
39874aeea5SJeff Kirsher 
40874aeea5SJeff Kirsher /* Size of buffer allocated for skb header area. */
41d4ef5b6fSJon Cooper #define EFX_SKB_HEADERS  128u
42874aeea5SJeff Kirsher 
43874aeea5SJeff Kirsher /* This is the percentage fill level below which new RX descriptors
44874aeea5SJeff Kirsher  * will be added to the RX descriptor ring.
45874aeea5SJeff Kirsher  */
4664235187SDavid Riddoch static unsigned int rx_refill_threshold;
47874aeea5SJeff Kirsher 
4885740cdfSBen Hutchings /* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
4985740cdfSBen Hutchings #define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
5085740cdfSBen Hutchings 				      EFX_RX_USR_BUF_SIZE)
5185740cdfSBen Hutchings 
52874aeea5SJeff Kirsher /*
53874aeea5SJeff Kirsher  * RX maximum head room required.
54874aeea5SJeff Kirsher  *
5585740cdfSBen Hutchings  * This must be at least 1 to prevent overflow, plus one packet-worth
5685740cdfSBen Hutchings  * to allow pipelined receives.
57874aeea5SJeff Kirsher  */
5885740cdfSBen Hutchings #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
59874aeea5SJeff Kirsher 
60b184f16bSBen Hutchings static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
61874aeea5SJeff Kirsher {
62b184f16bSBen Hutchings 	return page_address(buf->page) + buf->page_offset;
63874aeea5SJeff Kirsher }
64874aeea5SJeff Kirsher 
6543a3739dSJon Cooper static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
66874aeea5SJeff Kirsher {
6743a3739dSJon Cooper #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
6843a3739dSJon Cooper 	return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
69874aeea5SJeff Kirsher #else
7043a3739dSJon Cooper 	const u8 *data = eh + efx->rx_packet_hash_offset;
710beaca2cSBen Hutchings 	return (u32)data[0]	  |
72874aeea5SJeff Kirsher 	       (u32)data[1] << 8  |
73874aeea5SJeff Kirsher 	       (u32)data[2] << 16 |
740beaca2cSBen Hutchings 	       (u32)data[3] << 24;
75874aeea5SJeff Kirsher #endif
76874aeea5SJeff Kirsher }
77874aeea5SJeff Kirsher 
7885740cdfSBen Hutchings static inline struct efx_rx_buffer *
7985740cdfSBen Hutchings efx_rx_buf_next(struct efx_rx_queue *rx_queue, struct efx_rx_buffer *rx_buf)
8085740cdfSBen Hutchings {
8185740cdfSBen Hutchings 	if (unlikely(rx_buf == efx_rx_buffer(rx_queue, rx_queue->ptr_mask)))
8285740cdfSBen Hutchings 		return efx_rx_buffer(rx_queue, 0);
8385740cdfSBen Hutchings 	else
8485740cdfSBen Hutchings 		return rx_buf + 1;
8585740cdfSBen Hutchings }
8685740cdfSBen Hutchings 
872768935aSDaniel Pieczko static inline void efx_sync_rx_buffer(struct efx_nic *efx,
882768935aSDaniel Pieczko 				      struct efx_rx_buffer *rx_buf,
892768935aSDaniel Pieczko 				      unsigned int len)
902768935aSDaniel Pieczko {
912768935aSDaniel Pieczko 	dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
922768935aSDaniel Pieczko 				DMA_FROM_DEVICE);
932768935aSDaniel Pieczko }
942768935aSDaniel Pieczko 
951648a23fSDaniel Pieczko void efx_rx_config_page_split(struct efx_nic *efx)
961648a23fSDaniel Pieczko {
972ec03014SAndrew Rybchenko 	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
98950c54dfSBen Hutchings 				      EFX_RX_BUF_ALIGNMENT);
991648a23fSDaniel Pieczko 	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
1001648a23fSDaniel Pieczko 		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
1011648a23fSDaniel Pieczko 		 efx->rx_page_buf_step);
1021648a23fSDaniel Pieczko 	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
1031648a23fSDaniel Pieczko 		efx->rx_bufs_per_page;
1041648a23fSDaniel Pieczko 	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
1051648a23fSDaniel Pieczko 					       efx->rx_bufs_per_page);
1061648a23fSDaniel Pieczko }
1071648a23fSDaniel Pieczko 
1082768935aSDaniel Pieczko /* Check the RX page recycle ring for a page that can be reused. */
1092768935aSDaniel Pieczko static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
1102768935aSDaniel Pieczko {
1112768935aSDaniel Pieczko 	struct efx_nic *efx = rx_queue->efx;
1122768935aSDaniel Pieczko 	struct page *page;
1132768935aSDaniel Pieczko 	struct efx_rx_page_state *state;
1142768935aSDaniel Pieczko 	unsigned index;
1152768935aSDaniel Pieczko 
1162768935aSDaniel Pieczko 	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
1172768935aSDaniel Pieczko 	page = rx_queue->page_ring[index];
1182768935aSDaniel Pieczko 	if (page == NULL)
1192768935aSDaniel Pieczko 		return NULL;
1202768935aSDaniel Pieczko 
1212768935aSDaniel Pieczko 	rx_queue->page_ring[index] = NULL;
1222768935aSDaniel Pieczko 	/* page_remove cannot exceed page_add. */
1232768935aSDaniel Pieczko 	if (rx_queue->page_remove != rx_queue->page_add)
1242768935aSDaniel Pieczko 		++rx_queue->page_remove;
1252768935aSDaniel Pieczko 
1262768935aSDaniel Pieczko 	/* If page_count is 1 then we hold the only reference to this page. */
1272768935aSDaniel Pieczko 	if (page_count(page) == 1) {
1282768935aSDaniel Pieczko 		++rx_queue->page_recycle_count;
1292768935aSDaniel Pieczko 		return page;
1302768935aSDaniel Pieczko 	} else {
1312768935aSDaniel Pieczko 		state = page_address(page);
1322768935aSDaniel Pieczko 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
1332768935aSDaniel Pieczko 			       PAGE_SIZE << efx->rx_buffer_order,
1342768935aSDaniel Pieczko 			       DMA_FROM_DEVICE);
1352768935aSDaniel Pieczko 		put_page(page);
1362768935aSDaniel Pieczko 		++rx_queue->page_recycle_failed;
1372768935aSDaniel Pieczko 	}
1382768935aSDaniel Pieczko 
1392768935aSDaniel Pieczko 	return NULL;
1402768935aSDaniel Pieczko }
1412768935aSDaniel Pieczko 
142874aeea5SJeff Kirsher /**
14397d48a10SAlexandre Rames  * efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
144874aeea5SJeff Kirsher  *
145874aeea5SJeff Kirsher  * @rx_queue:		Efx RX queue
146874aeea5SJeff Kirsher  *
1471648a23fSDaniel Pieczko  * This allocates a batch of pages, maps them for DMA, and populates
1481648a23fSDaniel Pieczko  * struct efx_rx_buffers for each one. Return a negative error code or
1491648a23fSDaniel Pieczko  * 0 on success. If a single page can be used for multiple buffers,
1501648a23fSDaniel Pieczko  * then the page will either be inserted fully, or not at all.
151874aeea5SJeff Kirsher  */
152cce28794SJon Cooper static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
153874aeea5SJeff Kirsher {
154874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
155874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
156874aeea5SJeff Kirsher 	struct page *page;
157b590ace0SBen Hutchings 	unsigned int page_offset;
158874aeea5SJeff Kirsher 	struct efx_rx_page_state *state;
159874aeea5SJeff Kirsher 	dma_addr_t dma_addr;
160874aeea5SJeff Kirsher 	unsigned index, count;
161874aeea5SJeff Kirsher 
1621648a23fSDaniel Pieczko 	count = 0;
1631648a23fSDaniel Pieczko 	do {
1642768935aSDaniel Pieczko 		page = efx_reuse_page(rx_queue);
1652768935aSDaniel Pieczko 		if (page == NULL) {
166cce28794SJon Cooper 			page = alloc_pages(__GFP_COLD | __GFP_COMP |
167cce28794SJon Cooper 					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
168874aeea5SJeff Kirsher 					   efx->rx_buffer_order);
169874aeea5SJeff Kirsher 			if (unlikely(page == NULL))
170874aeea5SJeff Kirsher 				return -ENOMEM;
1712768935aSDaniel Pieczko 			dma_addr =
1722768935aSDaniel Pieczko 				dma_map_page(&efx->pci_dev->dev, page, 0,
173272baeebSBen Hutchings 					     PAGE_SIZE << efx->rx_buffer_order,
1740e33d870SBen Hutchings 					     DMA_FROM_DEVICE);
1752768935aSDaniel Pieczko 			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
1762768935aSDaniel Pieczko 						       dma_addr))) {
177874aeea5SJeff Kirsher 				__free_pages(page, efx->rx_buffer_order);
178874aeea5SJeff Kirsher 				return -EIO;
179874aeea5SJeff Kirsher 			}
180b8e02517SBen Hutchings 			state = page_address(page);
181874aeea5SJeff Kirsher 			state->dma_addr = dma_addr;
1822768935aSDaniel Pieczko 		} else {
1832768935aSDaniel Pieczko 			state = page_address(page);
1842768935aSDaniel Pieczko 			dma_addr = state->dma_addr;
1852768935aSDaniel Pieczko 		}
186874aeea5SJeff Kirsher 
187874aeea5SJeff Kirsher 		dma_addr += sizeof(struct efx_rx_page_state);
188b590ace0SBen Hutchings 		page_offset = sizeof(struct efx_rx_page_state);
189874aeea5SJeff Kirsher 
1901648a23fSDaniel Pieczko 		do {
191874aeea5SJeff Kirsher 			index = rx_queue->added_count & rx_queue->ptr_mask;
192874aeea5SJeff Kirsher 			rx_buf = efx_rx_buffer(rx_queue, index);
1932ec03014SAndrew Rybchenko 			rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
19497d48a10SAlexandre Rames 			rx_buf->page = page;
1952ec03014SAndrew Rybchenko 			rx_buf->page_offset = page_offset + efx->rx_ip_align;
196272baeebSBen Hutchings 			rx_buf->len = efx->rx_dma_len;
197179ea7f0SBen Hutchings 			rx_buf->flags = 0;
1981648a23fSDaniel Pieczko 			++rx_queue->added_count;
1991648a23fSDaniel Pieczko 			get_page(page);
2001648a23fSDaniel Pieczko 			dma_addr += efx->rx_page_buf_step;
2011648a23fSDaniel Pieczko 			page_offset += efx->rx_page_buf_step;
2021648a23fSDaniel Pieczko 		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
203179ea7f0SBen Hutchings 
204179ea7f0SBen Hutchings 		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
2051648a23fSDaniel Pieczko 	} while (++count < efx->rx_pages_per_batch);
206874aeea5SJeff Kirsher 
207874aeea5SJeff Kirsher 	return 0;
208874aeea5SJeff Kirsher }
209874aeea5SJeff Kirsher 
2102768935aSDaniel Pieczko /* Unmap a DMA-mapped page.  This function is only called for the final RX
2112768935aSDaniel Pieczko  * buffer in a page.
2122768935aSDaniel Pieczko  */
213874aeea5SJeff Kirsher static void efx_unmap_rx_buffer(struct efx_nic *efx,
2142768935aSDaniel Pieczko 				struct efx_rx_buffer *rx_buf)
215874aeea5SJeff Kirsher {
2162768935aSDaniel Pieczko 	struct page *page = rx_buf->page;
217874aeea5SJeff Kirsher 
2182768935aSDaniel Pieczko 	if (page) {
2192768935aSDaniel Pieczko 		struct efx_rx_page_state *state = page_address(page);
2200e33d870SBen Hutchings 		dma_unmap_page(&efx->pci_dev->dev,
221874aeea5SJeff Kirsher 			       state->dma_addr,
222272baeebSBen Hutchings 			       PAGE_SIZE << efx->rx_buffer_order,
2230e33d870SBen Hutchings 			       DMA_FROM_DEVICE);
224874aeea5SJeff Kirsher 	}
225874aeea5SJeff Kirsher }
226874aeea5SJeff Kirsher 
2272768935aSDaniel Pieczko static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
228874aeea5SJeff Kirsher {
22997d48a10SAlexandre Rames 	if (rx_buf->page) {
2302768935aSDaniel Pieczko 		put_page(rx_buf->page);
23197d48a10SAlexandre Rames 		rx_buf->page = NULL;
232874aeea5SJeff Kirsher 	}
233874aeea5SJeff Kirsher }
234874aeea5SJeff Kirsher 
2352768935aSDaniel Pieczko /* Attempt to recycle the page if there is an RX recycle ring; the page can
2362768935aSDaniel Pieczko  * only be added if this is the final RX buffer, to prevent pages being used in
2372768935aSDaniel Pieczko  * the descriptor ring and appearing in the recycle ring simultaneously.
2382768935aSDaniel Pieczko  */
2392768935aSDaniel Pieczko static void efx_recycle_rx_page(struct efx_channel *channel,
2402768935aSDaniel Pieczko 				struct efx_rx_buffer *rx_buf)
2412768935aSDaniel Pieczko {
2422768935aSDaniel Pieczko 	struct page *page = rx_buf->page;
2432768935aSDaniel Pieczko 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
2442768935aSDaniel Pieczko 	struct efx_nic *efx = rx_queue->efx;
2452768935aSDaniel Pieczko 	unsigned index;
2462768935aSDaniel Pieczko 
2472768935aSDaniel Pieczko 	/* Only recycle the page after processing the final buffer. */
248179ea7f0SBen Hutchings 	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
2492768935aSDaniel Pieczko 		return;
2502768935aSDaniel Pieczko 
2512768935aSDaniel Pieczko 	index = rx_queue->page_add & rx_queue->page_ptr_mask;
2522768935aSDaniel Pieczko 	if (rx_queue->page_ring[index] == NULL) {
2532768935aSDaniel Pieczko 		unsigned read_index = rx_queue->page_remove &
2542768935aSDaniel Pieczko 			rx_queue->page_ptr_mask;
2552768935aSDaniel Pieczko 
2562768935aSDaniel Pieczko 		/* The next slot in the recycle ring is available, but
2572768935aSDaniel Pieczko 		 * increment page_remove if the read pointer currently
2582768935aSDaniel Pieczko 		 * points here.
2592768935aSDaniel Pieczko 		 */
2602768935aSDaniel Pieczko 		if (read_index == index)
2612768935aSDaniel Pieczko 			++rx_queue->page_remove;
2622768935aSDaniel Pieczko 		rx_queue->page_ring[index] = page;
2632768935aSDaniel Pieczko 		++rx_queue->page_add;
2642768935aSDaniel Pieczko 		return;
2652768935aSDaniel Pieczko 	}
2662768935aSDaniel Pieczko 	++rx_queue->page_recycle_full;
2672768935aSDaniel Pieczko 	efx_unmap_rx_buffer(efx, rx_buf);
2682768935aSDaniel Pieczko 	put_page(rx_buf->page);
2692768935aSDaniel Pieczko }
2702768935aSDaniel Pieczko 
271874aeea5SJeff Kirsher static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
272874aeea5SJeff Kirsher 			       struct efx_rx_buffer *rx_buf)
273874aeea5SJeff Kirsher {
2742768935aSDaniel Pieczko 	/* Release the page reference we hold for the buffer. */
2752768935aSDaniel Pieczko 	if (rx_buf->page)
2762768935aSDaniel Pieczko 		put_page(rx_buf->page);
2772768935aSDaniel Pieczko 
2782768935aSDaniel Pieczko 	/* If this is the last buffer in a page, unmap and free it. */
279179ea7f0SBen Hutchings 	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
2802768935aSDaniel Pieczko 		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
2812768935aSDaniel Pieczko 		efx_free_rx_buffer(rx_buf);
2822768935aSDaniel Pieczko 	}
2832768935aSDaniel Pieczko 	rx_buf->page = NULL;
284874aeea5SJeff Kirsher }
285874aeea5SJeff Kirsher 
2862768935aSDaniel Pieczko /* Recycle the pages that are used by buffers that have just been received. */
287734d4e15SBen Hutchings static void efx_recycle_rx_pages(struct efx_channel *channel,
28885740cdfSBen Hutchings 				 struct efx_rx_buffer *rx_buf,
28985740cdfSBen Hutchings 				 unsigned int n_frags)
290874aeea5SJeff Kirsher {
291874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
292874aeea5SJeff Kirsher 
29385740cdfSBen Hutchings 	do {
2942768935aSDaniel Pieczko 		efx_recycle_rx_page(channel, rx_buf);
29585740cdfSBen Hutchings 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
29685740cdfSBen Hutchings 	} while (--n_frags);
297874aeea5SJeff Kirsher }
298874aeea5SJeff Kirsher 
299734d4e15SBen Hutchings static void efx_discard_rx_packet(struct efx_channel *channel,
300734d4e15SBen Hutchings 				  struct efx_rx_buffer *rx_buf,
301734d4e15SBen Hutchings 				  unsigned int n_frags)
302734d4e15SBen Hutchings {
303734d4e15SBen Hutchings 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
304734d4e15SBen Hutchings 
305734d4e15SBen Hutchings 	efx_recycle_rx_pages(channel, rx_buf, n_frags);
306734d4e15SBen Hutchings 
307734d4e15SBen Hutchings 	do {
308734d4e15SBen Hutchings 		efx_free_rx_buffer(rx_buf);
309734d4e15SBen Hutchings 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
310734d4e15SBen Hutchings 	} while (--n_frags);
311734d4e15SBen Hutchings }
312734d4e15SBen Hutchings 
313874aeea5SJeff Kirsher /**
314874aeea5SJeff Kirsher  * efx_fast_push_rx_descriptors - push new RX descriptors quickly
315874aeea5SJeff Kirsher  * @rx_queue:		RX descriptor queue
31649ce9c2cSBen Hutchings  *
317874aeea5SJeff Kirsher  * This will aim to fill the RX descriptor queue up to
318da9ca505SDavid Riddoch  * @rx_queue->@max_fill. If there is insufficient atomic
319874aeea5SJeff Kirsher  * memory to do so, a slow fill will be scheduled.
320874aeea5SJeff Kirsher  *
321874aeea5SJeff Kirsher  * The caller must provide serialisation (none is used here). In practise,
322874aeea5SJeff Kirsher  * this means this function must run from the NAPI handler, or be called
323874aeea5SJeff Kirsher  * when NAPI is disabled.
324874aeea5SJeff Kirsher  */
325cce28794SJon Cooper void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, bool atomic)
326874aeea5SJeff Kirsher {
3271648a23fSDaniel Pieczko 	struct efx_nic *efx = rx_queue->efx;
3281648a23fSDaniel Pieczko 	unsigned int fill_level, batch_size;
329874aeea5SJeff Kirsher 	int space, rc = 0;
330874aeea5SJeff Kirsher 
331d8aec745SBen Hutchings 	if (!rx_queue->refill_enabled)
332d8aec745SBen Hutchings 		return;
333d8aec745SBen Hutchings 
334874aeea5SJeff Kirsher 	/* Calculate current fill level, and exit if we don't need to fill */
335874aeea5SJeff Kirsher 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
336874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(fill_level > rx_queue->efx->rxq_entries);
337874aeea5SJeff Kirsher 	if (fill_level >= rx_queue->fast_fill_trigger)
338874aeea5SJeff Kirsher 		goto out;
339874aeea5SJeff Kirsher 
340874aeea5SJeff Kirsher 	/* Record minimum fill level */
341874aeea5SJeff Kirsher 	if (unlikely(fill_level < rx_queue->min_fill)) {
342874aeea5SJeff Kirsher 		if (fill_level)
343874aeea5SJeff Kirsher 			rx_queue->min_fill = fill_level;
344874aeea5SJeff Kirsher 	}
345874aeea5SJeff Kirsher 
3461648a23fSDaniel Pieczko 	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
347da9ca505SDavid Riddoch 	space = rx_queue->max_fill - fill_level;
3481648a23fSDaniel Pieczko 	EFX_BUG_ON_PARANOID(space < batch_size);
349874aeea5SJeff Kirsher 
350874aeea5SJeff Kirsher 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
351874aeea5SJeff Kirsher 		   "RX queue %d fast-filling descriptor ring from"
35297d48a10SAlexandre Rames 		   " level %d to level %d\n",
353874aeea5SJeff Kirsher 		   efx_rx_queue_index(rx_queue), fill_level,
35497d48a10SAlexandre Rames 		   rx_queue->max_fill);
35597d48a10SAlexandre Rames 
356874aeea5SJeff Kirsher 
357874aeea5SJeff Kirsher 	do {
358cce28794SJon Cooper 		rc = efx_init_rx_buffers(rx_queue, atomic);
359874aeea5SJeff Kirsher 		if (unlikely(rc)) {
360874aeea5SJeff Kirsher 			/* Ensure that we don't leave the rx queue empty */
361874aeea5SJeff Kirsher 			if (rx_queue->added_count == rx_queue->removed_count)
362874aeea5SJeff Kirsher 				efx_schedule_slow_fill(rx_queue);
363874aeea5SJeff Kirsher 			goto out;
364874aeea5SJeff Kirsher 		}
3651648a23fSDaniel Pieczko 	} while ((space -= batch_size) >= batch_size);
366874aeea5SJeff Kirsher 
367874aeea5SJeff Kirsher 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
368874aeea5SJeff Kirsher 		   "RX queue %d fast-filled descriptor ring "
369874aeea5SJeff Kirsher 		   "to level %d\n", efx_rx_queue_index(rx_queue),
370874aeea5SJeff Kirsher 		   rx_queue->added_count - rx_queue->removed_count);
371874aeea5SJeff Kirsher 
372874aeea5SJeff Kirsher  out:
373874aeea5SJeff Kirsher 	if (rx_queue->notified_count != rx_queue->added_count)
374874aeea5SJeff Kirsher 		efx_nic_notify_rx_desc(rx_queue);
375874aeea5SJeff Kirsher }
376874aeea5SJeff Kirsher 
377874aeea5SJeff Kirsher void efx_rx_slow_fill(unsigned long context)
378874aeea5SJeff Kirsher {
379874aeea5SJeff Kirsher 	struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
380874aeea5SJeff Kirsher 
381874aeea5SJeff Kirsher 	/* Post an event to cause NAPI to run and refill the queue */
3822ae75dacSBen Hutchings 	efx_nic_generate_fill_event(rx_queue);
383874aeea5SJeff Kirsher 	++rx_queue->slow_fill_count;
384874aeea5SJeff Kirsher }
385874aeea5SJeff Kirsher 
386874aeea5SJeff Kirsher static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
387874aeea5SJeff Kirsher 				     struct efx_rx_buffer *rx_buf,
38897d48a10SAlexandre Rames 				     int len)
389874aeea5SJeff Kirsher {
390874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
391874aeea5SJeff Kirsher 	unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
392874aeea5SJeff Kirsher 
393874aeea5SJeff Kirsher 	if (likely(len <= max_len))
394874aeea5SJeff Kirsher 		return;
395874aeea5SJeff Kirsher 
396874aeea5SJeff Kirsher 	/* The packet must be discarded, but this is only a fatal error
397874aeea5SJeff Kirsher 	 * if the caller indicated it was
398874aeea5SJeff Kirsher 	 */
399db339569SBen Hutchings 	rx_buf->flags |= EFX_RX_PKT_DISCARD;
400874aeea5SJeff Kirsher 
401874aeea5SJeff Kirsher 	if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) {
402874aeea5SJeff Kirsher 		if (net_ratelimit())
403874aeea5SJeff Kirsher 			netif_err(efx, rx_err, efx->net_dev,
404874aeea5SJeff Kirsher 				  " RX queue %d seriously overlength "
405874aeea5SJeff Kirsher 				  "RX event (0x%x > 0x%x+0x%x). Leaking\n",
406874aeea5SJeff Kirsher 				  efx_rx_queue_index(rx_queue), len, max_len,
407874aeea5SJeff Kirsher 				  efx->type->rx_buffer_padding);
408874aeea5SJeff Kirsher 		efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY);
409874aeea5SJeff Kirsher 	} else {
410874aeea5SJeff Kirsher 		if (net_ratelimit())
411874aeea5SJeff Kirsher 			netif_err(efx, rx_err, efx->net_dev,
412874aeea5SJeff Kirsher 				  " RX queue %d overlength RX event "
413874aeea5SJeff Kirsher 				  "(0x%x > 0x%x)\n",
414874aeea5SJeff Kirsher 				  efx_rx_queue_index(rx_queue), len, max_len);
415874aeea5SJeff Kirsher 	}
416874aeea5SJeff Kirsher 
417874aeea5SJeff Kirsher 	efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
418874aeea5SJeff Kirsher }
419874aeea5SJeff Kirsher 
42061321d92SBen Hutchings /* Pass a received packet up through GRO.  GRO can handle pages
42161321d92SBen Hutchings  * regardless of checksum state and skbs with a good checksum.
422874aeea5SJeff Kirsher  */
42385740cdfSBen Hutchings static void
42485740cdfSBen Hutchings efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
42585740cdfSBen Hutchings 		  unsigned int n_frags, u8 *eh)
426874aeea5SJeff Kirsher {
427874aeea5SJeff Kirsher 	struct napi_struct *napi = &channel->napi_str;
428874aeea5SJeff Kirsher 	gro_result_t gro_result;
429874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
430874aeea5SJeff Kirsher 	struct sk_buff *skb;
431874aeea5SJeff Kirsher 
432874aeea5SJeff Kirsher 	skb = napi_get_frags(napi);
43385740cdfSBen Hutchings 	if (unlikely(!skb)) {
43485740cdfSBen Hutchings 		while (n_frags--) {
43585740cdfSBen Hutchings 			put_page(rx_buf->page);
43685740cdfSBen Hutchings 			rx_buf->page = NULL;
43785740cdfSBen Hutchings 			rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
43885740cdfSBen Hutchings 		}
439874aeea5SJeff Kirsher 		return;
440874aeea5SJeff Kirsher 	}
441874aeea5SJeff Kirsher 
442874aeea5SJeff Kirsher 	if (efx->net_dev->features & NETIF_F_RXHASH)
443c7cb38afSTom Herbert 		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
444c7cb38afSTom Herbert 			     PKT_HASH_TYPE_L3);
445db339569SBen Hutchings 	skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
446db339569SBen Hutchings 			  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
447874aeea5SJeff Kirsher 
44885740cdfSBen Hutchings 	for (;;) {
44985740cdfSBen Hutchings 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
45085740cdfSBen Hutchings 				   rx_buf->page, rx_buf->page_offset,
45185740cdfSBen Hutchings 				   rx_buf->len);
45285740cdfSBen Hutchings 		rx_buf->page = NULL;
45385740cdfSBen Hutchings 		skb->len += rx_buf->len;
45485740cdfSBen Hutchings 		if (skb_shinfo(skb)->nr_frags == n_frags)
45585740cdfSBen Hutchings 			break;
45685740cdfSBen Hutchings 
45785740cdfSBen Hutchings 		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
45885740cdfSBen Hutchings 	}
45985740cdfSBen Hutchings 
46085740cdfSBen Hutchings 	skb->data_len = skb->len;
46185740cdfSBen Hutchings 	skb->truesize += n_frags * efx->rx_buffer_truesize;
46285740cdfSBen Hutchings 
46379d68b37SStuart Hodgson 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
464874aeea5SJeff Kirsher 
465*36763266SAlexandre Rames 	skb_mark_napi_id(skb, &channel->napi_str);
466874aeea5SJeff Kirsher 	gro_result = napi_gro_frags(napi);
46797d48a10SAlexandre Rames 	if (gro_result != GRO_DROP)
468874aeea5SJeff Kirsher 		channel->irq_mod_score += 2;
469874aeea5SJeff Kirsher }
47097d48a10SAlexandre Rames 
47185740cdfSBen Hutchings /* Allocate and construct an SKB around page fragments */
47297d48a10SAlexandre Rames static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
47397d48a10SAlexandre Rames 				     struct efx_rx_buffer *rx_buf,
47485740cdfSBen Hutchings 				     unsigned int n_frags,
47597d48a10SAlexandre Rames 				     u8 *eh, int hdr_len)
47697d48a10SAlexandre Rames {
47797d48a10SAlexandre Rames 	struct efx_nic *efx = channel->efx;
47897d48a10SAlexandre Rames 	struct sk_buff *skb;
47997d48a10SAlexandre Rames 
48097d48a10SAlexandre Rames 	/* Allocate an SKB to store the headers */
4812ccd0b19SBen Hutchings 	skb = netdev_alloc_skb(efx->net_dev,
4822ccd0b19SBen Hutchings 			       efx->rx_ip_align + efx->rx_prefix_size +
4832ccd0b19SBen Hutchings 			       hdr_len);
484e4d112e4SEdward Cree 	if (unlikely(skb == NULL)) {
485e4d112e4SEdward Cree 		atomic_inc(&efx->n_rx_noskb_drops);
48697d48a10SAlexandre Rames 		return NULL;
487e4d112e4SEdward Cree 	}
48897d48a10SAlexandre Rames 
48997d48a10SAlexandre Rames 	EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
49097d48a10SAlexandre Rames 
4912ccd0b19SBen Hutchings 	memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
4922ccd0b19SBen Hutchings 	       efx->rx_prefix_size + hdr_len);
4932ccd0b19SBen Hutchings 	skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
4942ccd0b19SBen Hutchings 	__skb_put(skb, hdr_len);
49597d48a10SAlexandre Rames 
49685740cdfSBen Hutchings 	/* Append the remaining page(s) onto the frag list */
49797d48a10SAlexandre Rames 	if (rx_buf->len > hdr_len) {
49885740cdfSBen Hutchings 		rx_buf->page_offset += hdr_len;
49985740cdfSBen Hutchings 		rx_buf->len -= hdr_len;
50085740cdfSBen Hutchings 
50185740cdfSBen Hutchings 		for (;;) {
50285740cdfSBen Hutchings 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
50385740cdfSBen Hutchings 					   rx_buf->page, rx_buf->page_offset,
50485740cdfSBen Hutchings 					   rx_buf->len);
50585740cdfSBen Hutchings 			rx_buf->page = NULL;
50685740cdfSBen Hutchings 			skb->len += rx_buf->len;
50785740cdfSBen Hutchings 			skb->data_len += rx_buf->len;
50885740cdfSBen Hutchings 			if (skb_shinfo(skb)->nr_frags == n_frags)
50985740cdfSBen Hutchings 				break;
51085740cdfSBen Hutchings 
51185740cdfSBen Hutchings 			rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
51285740cdfSBen Hutchings 		}
51397d48a10SAlexandre Rames 	} else {
51497d48a10SAlexandre Rames 		__free_pages(rx_buf->page, efx->rx_buffer_order);
51585740cdfSBen Hutchings 		rx_buf->page = NULL;
51685740cdfSBen Hutchings 		n_frags = 0;
51797d48a10SAlexandre Rames 	}
51897d48a10SAlexandre Rames 
51985740cdfSBen Hutchings 	skb->truesize += n_frags * efx->rx_buffer_truesize;
52097d48a10SAlexandre Rames 
52197d48a10SAlexandre Rames 	/* Move past the ethernet header */
52297d48a10SAlexandre Rames 	skb->protocol = eth_type_trans(skb, efx->net_dev);
52397d48a10SAlexandre Rames 
524*36763266SAlexandre Rames 	skb_mark_napi_id(skb, &channel->napi_str);
525*36763266SAlexandre Rames 
52697d48a10SAlexandre Rames 	return skb;
527874aeea5SJeff Kirsher }
528874aeea5SJeff Kirsher 
529874aeea5SJeff Kirsher void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
53085740cdfSBen Hutchings 		   unsigned int n_frags, unsigned int len, u16 flags)
531874aeea5SJeff Kirsher {
532874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
533874aeea5SJeff Kirsher 	struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
534874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
535874aeea5SJeff Kirsher 
5368ccf3800SAndrew Rybchenko 	rx_queue->rx_packets++;
5378ccf3800SAndrew Rybchenko 
538874aeea5SJeff Kirsher 	rx_buf = efx_rx_buffer(rx_queue, index);
539179ea7f0SBen Hutchings 	rx_buf->flags |= flags;
540874aeea5SJeff Kirsher 
54185740cdfSBen Hutchings 	/* Validate the number of fragments and completed length */
54285740cdfSBen Hutchings 	if (n_frags == 1) {
5433dced740SBen Hutchings 		if (!(flags & EFX_RX_PKT_PREFIX_LEN))
54497d48a10SAlexandre Rames 			efx_rx_packet__check_len(rx_queue, rx_buf, len);
54585740cdfSBen Hutchings 	} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
546e8c68c0aSJon Cooper 		   unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
547e8c68c0aSJon Cooper 		   unlikely(len > n_frags * efx->rx_dma_len) ||
54885740cdfSBen Hutchings 		   unlikely(!efx->rx_scatter)) {
54985740cdfSBen Hutchings 		/* If this isn't an explicit discard request, either
55085740cdfSBen Hutchings 		 * the hardware or the driver is broken.
55185740cdfSBen Hutchings 		 */
55285740cdfSBen Hutchings 		WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
55385740cdfSBen Hutchings 		rx_buf->flags |= EFX_RX_PKT_DISCARD;
55485740cdfSBen Hutchings 	}
555874aeea5SJeff Kirsher 
556874aeea5SJeff Kirsher 	netif_vdbg(efx, rx_status, efx->net_dev,
55785740cdfSBen Hutchings 		   "RX queue %d received ids %x-%x len %d %s%s\n",
558874aeea5SJeff Kirsher 		   efx_rx_queue_index(rx_queue), index,
55985740cdfSBen Hutchings 		   (index + n_frags - 1) & rx_queue->ptr_mask, len,
560db339569SBen Hutchings 		   (rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
561db339569SBen Hutchings 		   (rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
562874aeea5SJeff Kirsher 
56385740cdfSBen Hutchings 	/* Discard packet, if instructed to do so.  Process the
56485740cdfSBen Hutchings 	 * previous receive first.
56585740cdfSBen Hutchings 	 */
566db339569SBen Hutchings 	if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
56785740cdfSBen Hutchings 		efx_rx_flush_packet(channel);
568734d4e15SBen Hutchings 		efx_discard_rx_packet(channel, rx_buf, n_frags);
56985740cdfSBen Hutchings 		return;
570874aeea5SJeff Kirsher 	}
571874aeea5SJeff Kirsher 
5723dced740SBen Hutchings 	if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
57385740cdfSBen Hutchings 		rx_buf->len = len;
57485740cdfSBen Hutchings 
5752768935aSDaniel Pieczko 	/* Release and/or sync the DMA mapping - assumes all RX buffers
5762768935aSDaniel Pieczko 	 * consumed in-order per RX queue.
577874aeea5SJeff Kirsher 	 */
5782768935aSDaniel Pieczko 	efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
579874aeea5SJeff Kirsher 
580874aeea5SJeff Kirsher 	/* Prefetch nice and early so data will (hopefully) be in cache by
581874aeea5SJeff Kirsher 	 * the time we look at it.
582874aeea5SJeff Kirsher 	 */
5835036b7c7SBen Hutchings 	prefetch(efx_rx_buf_va(rx_buf));
584874aeea5SJeff Kirsher 
58543a3739dSJon Cooper 	rx_buf->page_offset += efx->rx_prefix_size;
58643a3739dSJon Cooper 	rx_buf->len -= efx->rx_prefix_size;
58785740cdfSBen Hutchings 
58885740cdfSBen Hutchings 	if (n_frags > 1) {
58985740cdfSBen Hutchings 		/* Release/sync DMA mapping for additional fragments.
59085740cdfSBen Hutchings 		 * Fix length for last fragment.
59185740cdfSBen Hutchings 		 */
59285740cdfSBen Hutchings 		unsigned int tail_frags = n_frags - 1;
59385740cdfSBen Hutchings 
59485740cdfSBen Hutchings 		for (;;) {
59585740cdfSBen Hutchings 			rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
59685740cdfSBen Hutchings 			if (--tail_frags == 0)
59785740cdfSBen Hutchings 				break;
598e8c68c0aSJon Cooper 			efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
59985740cdfSBen Hutchings 		}
600e8c68c0aSJon Cooper 		rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
6012768935aSDaniel Pieczko 		efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
60285740cdfSBen Hutchings 	}
603b74e3e8cSBen Hutchings 
604734d4e15SBen Hutchings 	/* All fragments have been DMA-synced, so recycle pages. */
6052768935aSDaniel Pieczko 	rx_buf = efx_rx_buffer(rx_queue, index);
606734d4e15SBen Hutchings 	efx_recycle_rx_pages(channel, rx_buf, n_frags);
6072768935aSDaniel Pieczko 
608874aeea5SJeff Kirsher 	/* Pipeline receives so that we give time for packet headers to be
609874aeea5SJeff Kirsher 	 * prefetched into cache.
610874aeea5SJeff Kirsher 	 */
611ff734ef4SBen Hutchings 	efx_rx_flush_packet(channel);
61285740cdfSBen Hutchings 	channel->rx_pkt_n_frags = n_frags;
61385740cdfSBen Hutchings 	channel->rx_pkt_index = index;
614874aeea5SJeff Kirsher }
615874aeea5SJeff Kirsher 
61697d48a10SAlexandre Rames static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
61785740cdfSBen Hutchings 			   struct efx_rx_buffer *rx_buf,
61885740cdfSBen Hutchings 			   unsigned int n_frags)
6191ddceb4cSBen Hutchings {
6201ddceb4cSBen Hutchings 	struct sk_buff *skb;
62197d48a10SAlexandre Rames 	u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
6221ddceb4cSBen Hutchings 
62385740cdfSBen Hutchings 	skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
62497d48a10SAlexandre Rames 	if (unlikely(skb == NULL)) {
6252768935aSDaniel Pieczko 		efx_free_rx_buffer(rx_buf);
62697d48a10SAlexandre Rames 		return;
62797d48a10SAlexandre Rames 	}
62897d48a10SAlexandre Rames 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
6291ddceb4cSBen Hutchings 
6301ddceb4cSBen Hutchings 	/* Set the SKB flags */
6311ddceb4cSBen Hutchings 	skb_checksum_none_assert(skb);
632c99dffc4SJon Cooper 	if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED))
633c99dffc4SJon Cooper 		skb->ip_summed = CHECKSUM_UNNECESSARY;
6341ddceb4cSBen Hutchings 
635bd9a265dSJon Cooper 	efx_rx_skb_attach_timestamp(channel, skb);
636bd9a265dSJon Cooper 
637c31e5f9fSStuart Hodgson 	if (channel->type->receive_skb)
6384a74dc65SBen Hutchings 		if (channel->type->receive_skb(channel, skb))
63997d48a10SAlexandre Rames 			return;
6404a74dc65SBen Hutchings 
6414a74dc65SBen Hutchings 	/* Pass the packet up */
6421ddceb4cSBen Hutchings 	netif_receive_skb(skb);
6431ddceb4cSBen Hutchings }
6441ddceb4cSBen Hutchings 
645874aeea5SJeff Kirsher /* Handle a received packet.  Second half: Touches packet payload. */
64685740cdfSBen Hutchings void __efx_rx_packet(struct efx_channel *channel)
647874aeea5SJeff Kirsher {
648874aeea5SJeff Kirsher 	struct efx_nic *efx = channel->efx;
64985740cdfSBen Hutchings 	struct efx_rx_buffer *rx_buf =
65085740cdfSBen Hutchings 		efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
651b74e3e8cSBen Hutchings 	u8 *eh = efx_rx_buf_va(rx_buf);
652874aeea5SJeff Kirsher 
6533dced740SBen Hutchings 	/* Read length from the prefix if necessary.  This already
6543dced740SBen Hutchings 	 * excludes the length of the prefix itself.
6553dced740SBen Hutchings 	 */
6563dced740SBen Hutchings 	if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
6573dced740SBen Hutchings 		rx_buf->len = le16_to_cpup((__le16 *)
6583dced740SBen Hutchings 					   (eh + efx->rx_packet_len_offset));
6593dced740SBen Hutchings 
660874aeea5SJeff Kirsher 	/* If we're in loopback test, then pass the packet directly to the
661874aeea5SJeff Kirsher 	 * loopback layer, and free the rx_buf here
662874aeea5SJeff Kirsher 	 */
663874aeea5SJeff Kirsher 	if (unlikely(efx->loopback_selftest)) {
664874aeea5SJeff Kirsher 		efx_loopback_rx_packet(efx, eh, rx_buf->len);
6652768935aSDaniel Pieczko 		efx_free_rx_buffer(rx_buf);
66685740cdfSBen Hutchings 		goto out;
667874aeea5SJeff Kirsher 	}
668874aeea5SJeff Kirsher 
669874aeea5SJeff Kirsher 	if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
670db339569SBen Hutchings 		rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
671874aeea5SJeff Kirsher 
672*36763266SAlexandre Rames 	if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb &&
673*36763266SAlexandre Rames 	    !efx_channel_busy_polling(channel))
67485740cdfSBen Hutchings 		efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
6751ddceb4cSBen Hutchings 	else
67685740cdfSBen Hutchings 		efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
67785740cdfSBen Hutchings out:
67885740cdfSBen Hutchings 	channel->rx_pkt_n_frags = 0;
679874aeea5SJeff Kirsher }
680874aeea5SJeff Kirsher 
681874aeea5SJeff Kirsher int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
682874aeea5SJeff Kirsher {
683874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
684874aeea5SJeff Kirsher 	unsigned int entries;
685874aeea5SJeff Kirsher 	int rc;
686874aeea5SJeff Kirsher 
687874aeea5SJeff Kirsher 	/* Create the smallest power-of-two aligned ring */
688874aeea5SJeff Kirsher 	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
689874aeea5SJeff Kirsher 	EFX_BUG_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
690874aeea5SJeff Kirsher 	rx_queue->ptr_mask = entries - 1;
691874aeea5SJeff Kirsher 
692874aeea5SJeff Kirsher 	netif_dbg(efx, probe, efx->net_dev,
693874aeea5SJeff Kirsher 		  "creating RX queue %d size %#x mask %#x\n",
694874aeea5SJeff Kirsher 		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
695874aeea5SJeff Kirsher 		  rx_queue->ptr_mask);
696874aeea5SJeff Kirsher 
697874aeea5SJeff Kirsher 	/* Allocate RX buffers */
698c2e4e25aSThomas Meyer 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
699874aeea5SJeff Kirsher 				   GFP_KERNEL);
700874aeea5SJeff Kirsher 	if (!rx_queue->buffer)
701874aeea5SJeff Kirsher 		return -ENOMEM;
702874aeea5SJeff Kirsher 
703874aeea5SJeff Kirsher 	rc = efx_nic_probe_rx(rx_queue);
704874aeea5SJeff Kirsher 	if (rc) {
705874aeea5SJeff Kirsher 		kfree(rx_queue->buffer);
706874aeea5SJeff Kirsher 		rx_queue->buffer = NULL;
707874aeea5SJeff Kirsher 	}
7082768935aSDaniel Pieczko 
709874aeea5SJeff Kirsher 	return rc;
710874aeea5SJeff Kirsher }
711874aeea5SJeff Kirsher 
712debd0034Sstephen hemminger static void efx_init_rx_recycle_ring(struct efx_nic *efx,
7132768935aSDaniel Pieczko 				     struct efx_rx_queue *rx_queue)
7142768935aSDaniel Pieczko {
7152768935aSDaniel Pieczko 	unsigned int bufs_in_recycle_ring, page_ring_size;
7162768935aSDaniel Pieczko 
7172768935aSDaniel Pieczko 	/* Set the RX recycle ring size */
7182768935aSDaniel Pieczko #ifdef CONFIG_PPC64
7192768935aSDaniel Pieczko 	bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
7202768935aSDaniel Pieczko #else
721636d73daSBen Hutchings 	if (iommu_present(&pci_bus_type))
7222768935aSDaniel Pieczko 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_IOMMU;
7232768935aSDaniel Pieczko 	else
7242768935aSDaniel Pieczko 		bufs_in_recycle_ring = EFX_RECYCLE_RING_SIZE_NOIOMMU;
7252768935aSDaniel Pieczko #endif /* CONFIG_PPC64 */
7262768935aSDaniel Pieczko 
7272768935aSDaniel Pieczko 	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
7282768935aSDaniel Pieczko 					    efx->rx_bufs_per_page);
7292768935aSDaniel Pieczko 	rx_queue->page_ring = kcalloc(page_ring_size,
7302768935aSDaniel Pieczko 				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
7312768935aSDaniel Pieczko 	rx_queue->page_ptr_mask = page_ring_size - 1;
7322768935aSDaniel Pieczko }
7332768935aSDaniel Pieczko 
734874aeea5SJeff Kirsher void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
735874aeea5SJeff Kirsher {
736874aeea5SJeff Kirsher 	struct efx_nic *efx = rx_queue->efx;
73764235187SDavid Riddoch 	unsigned int max_fill, trigger, max_trigger;
738874aeea5SJeff Kirsher 
739874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
740874aeea5SJeff Kirsher 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
741874aeea5SJeff Kirsher 
742874aeea5SJeff Kirsher 	/* Initialise ptr fields */
743874aeea5SJeff Kirsher 	rx_queue->added_count = 0;
744874aeea5SJeff Kirsher 	rx_queue->notified_count = 0;
745874aeea5SJeff Kirsher 	rx_queue->removed_count = 0;
746874aeea5SJeff Kirsher 	rx_queue->min_fill = -1U;
7472768935aSDaniel Pieczko 	efx_init_rx_recycle_ring(efx, rx_queue);
7482768935aSDaniel Pieczko 
7492768935aSDaniel Pieczko 	rx_queue->page_remove = 0;
7502768935aSDaniel Pieczko 	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
7512768935aSDaniel Pieczko 	rx_queue->page_recycle_count = 0;
7522768935aSDaniel Pieczko 	rx_queue->page_recycle_failed = 0;
7532768935aSDaniel Pieczko 	rx_queue->page_recycle_full = 0;
754874aeea5SJeff Kirsher 
755874aeea5SJeff Kirsher 	/* Initialise limit fields */
756874aeea5SJeff Kirsher 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
7571648a23fSDaniel Pieczko 	max_trigger =
7581648a23fSDaniel Pieczko 		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
75964235187SDavid Riddoch 	if (rx_refill_threshold != 0) {
760874aeea5SJeff Kirsher 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
76164235187SDavid Riddoch 		if (trigger > max_trigger)
76264235187SDavid Riddoch 			trigger = max_trigger;
76364235187SDavid Riddoch 	} else {
76464235187SDavid Riddoch 		trigger = max_trigger;
76564235187SDavid Riddoch 	}
766874aeea5SJeff Kirsher 
767874aeea5SJeff Kirsher 	rx_queue->max_fill = max_fill;
768874aeea5SJeff Kirsher 	rx_queue->fast_fill_trigger = trigger;
769d8aec745SBen Hutchings 	rx_queue->refill_enabled = true;
770874aeea5SJeff Kirsher 
771874aeea5SJeff Kirsher 	/* Set up RX descriptor ring */
772874aeea5SJeff Kirsher 	efx_nic_init_rx(rx_queue);
773874aeea5SJeff Kirsher }
774874aeea5SJeff Kirsher 
775874aeea5SJeff Kirsher void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
776874aeea5SJeff Kirsher {
777874aeea5SJeff Kirsher 	int i;
7782768935aSDaniel Pieczko 	struct efx_nic *efx = rx_queue->efx;
779874aeea5SJeff Kirsher 	struct efx_rx_buffer *rx_buf;
780874aeea5SJeff Kirsher 
781874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
782874aeea5SJeff Kirsher 		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
783874aeea5SJeff Kirsher 
784874aeea5SJeff Kirsher 	del_timer_sync(&rx_queue->slow_fill);
785874aeea5SJeff Kirsher 
7862768935aSDaniel Pieczko 	/* Release RX buffers from the current read ptr to the write ptr */
787874aeea5SJeff Kirsher 	if (rx_queue->buffer) {
7882768935aSDaniel Pieczko 		for (i = rx_queue->removed_count; i < rx_queue->added_count;
7892768935aSDaniel Pieczko 		     i++) {
7902768935aSDaniel Pieczko 			unsigned index = i & rx_queue->ptr_mask;
7912768935aSDaniel Pieczko 			rx_buf = efx_rx_buffer(rx_queue, index);
792874aeea5SJeff Kirsher 			efx_fini_rx_buffer(rx_queue, rx_buf);
793874aeea5SJeff Kirsher 		}
794874aeea5SJeff Kirsher 	}
7952768935aSDaniel Pieczko 
7962768935aSDaniel Pieczko 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
7972768935aSDaniel Pieczko 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
7982768935aSDaniel Pieczko 		struct page *page = rx_queue->page_ring[i];
7992768935aSDaniel Pieczko 		struct efx_rx_page_state *state;
8002768935aSDaniel Pieczko 
8012768935aSDaniel Pieczko 		if (page == NULL)
8022768935aSDaniel Pieczko 			continue;
8032768935aSDaniel Pieczko 
8042768935aSDaniel Pieczko 		state = page_address(page);
8052768935aSDaniel Pieczko 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
8062768935aSDaniel Pieczko 			       PAGE_SIZE << efx->rx_buffer_order,
8072768935aSDaniel Pieczko 			       DMA_FROM_DEVICE);
8082768935aSDaniel Pieczko 		put_page(page);
8092768935aSDaniel Pieczko 	}
8102768935aSDaniel Pieczko 	kfree(rx_queue->page_ring);
8112768935aSDaniel Pieczko 	rx_queue->page_ring = NULL;
812874aeea5SJeff Kirsher }
813874aeea5SJeff Kirsher 
814874aeea5SJeff Kirsher void efx_remove_rx_queue(struct efx_rx_queue *rx_queue)
815874aeea5SJeff Kirsher {
816874aeea5SJeff Kirsher 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
817874aeea5SJeff Kirsher 		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
818874aeea5SJeff Kirsher 
819874aeea5SJeff Kirsher 	efx_nic_remove_rx(rx_queue);
820874aeea5SJeff Kirsher 
821874aeea5SJeff Kirsher 	kfree(rx_queue->buffer);
822874aeea5SJeff Kirsher 	rx_queue->buffer = NULL;
823874aeea5SJeff Kirsher }
824874aeea5SJeff Kirsher 
825874aeea5SJeff Kirsher 
826874aeea5SJeff Kirsher module_param(rx_refill_threshold, uint, 0444);
827874aeea5SJeff Kirsher MODULE_PARM_DESC(rx_refill_threshold,
82864235187SDavid Riddoch 		 "RX descriptor ring refill threshold (%)");
829874aeea5SJeff Kirsher 
830add72477SBen Hutchings #ifdef CONFIG_RFS_ACCEL
831add72477SBen Hutchings 
832add72477SBen Hutchings int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
833add72477SBen Hutchings 		   u16 rxq_index, u32 flow_id)
834add72477SBen Hutchings {
835add72477SBen Hutchings 	struct efx_nic *efx = netdev_priv(net_dev);
836add72477SBen Hutchings 	struct efx_channel *channel;
837add72477SBen Hutchings 	struct efx_filter_spec spec;
838add72477SBen Hutchings 	const __be16 *ports;
839c47b2d9dSBen Hutchings 	__be16 ether_type;
840add72477SBen Hutchings 	int nhoff;
841add72477SBen Hutchings 	int rc;
842add72477SBen Hutchings 
843c47b2d9dSBen Hutchings 	/* The core RPS/RFS code has already parsed and validated
844c47b2d9dSBen Hutchings 	 * VLAN, IP and transport headers.  We assume they are in the
845c47b2d9dSBen Hutchings 	 * header area.
846c47b2d9dSBen Hutchings 	 */
847add72477SBen Hutchings 
848add72477SBen Hutchings 	if (skb->protocol == htons(ETH_P_8021Q)) {
849c47b2d9dSBen Hutchings 		const struct vlan_hdr *vh =
850c47b2d9dSBen Hutchings 			(const struct vlan_hdr *)skb->data;
851add72477SBen Hutchings 
852c47b2d9dSBen Hutchings 		/* We can't filter on the IP 5-tuple and the vlan
853c47b2d9dSBen Hutchings 		 * together, so just strip the vlan header and filter
854c47b2d9dSBen Hutchings 		 * on the IP part.
855add72477SBen Hutchings 		 */
856c47b2d9dSBen Hutchings 		EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
857c47b2d9dSBen Hutchings 		ether_type = vh->h_vlan_encapsulated_proto;
858c47b2d9dSBen Hutchings 		nhoff = sizeof(struct vlan_hdr);
859c47b2d9dSBen Hutchings 	} else {
860c47b2d9dSBen Hutchings 		ether_type = skb->protocol;
861c47b2d9dSBen Hutchings 		nhoff = 0;
862add72477SBen Hutchings 	}
863add72477SBen Hutchings 
864c47b2d9dSBen Hutchings 	if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
865add72477SBen Hutchings 		return -EPROTONOSUPPORT;
866add72477SBen Hutchings 
867add72477SBen Hutchings 	efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
868add72477SBen Hutchings 			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
869add72477SBen Hutchings 			   rxq_index);
870c47b2d9dSBen Hutchings 	spec.match_flags =
871c47b2d9dSBen Hutchings 		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
872c47b2d9dSBen Hutchings 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
873c47b2d9dSBen Hutchings 		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
874c47b2d9dSBen Hutchings 	spec.ether_type = ether_type;
875c47b2d9dSBen Hutchings 
876c47b2d9dSBen Hutchings 	if (ether_type == htons(ETH_P_IP)) {
877c47b2d9dSBen Hutchings 		const struct iphdr *ip =
878c47b2d9dSBen Hutchings 			(const struct iphdr *)(skb->data + nhoff);
879c47b2d9dSBen Hutchings 
880c47b2d9dSBen Hutchings 		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
881c47b2d9dSBen Hutchings 		if (ip_is_fragment(ip))
882c47b2d9dSBen Hutchings 			return -EPROTONOSUPPORT;
883c47b2d9dSBen Hutchings 		spec.ip_proto = ip->protocol;
884c47b2d9dSBen Hutchings 		spec.rem_host[0] = ip->saddr;
885c47b2d9dSBen Hutchings 		spec.loc_host[0] = ip->daddr;
886c47b2d9dSBen Hutchings 		EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
887c47b2d9dSBen Hutchings 		ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
888c47b2d9dSBen Hutchings 	} else {
889c47b2d9dSBen Hutchings 		const struct ipv6hdr *ip6 =
890c47b2d9dSBen Hutchings 			(const struct ipv6hdr *)(skb->data + nhoff);
891c47b2d9dSBen Hutchings 
892c47b2d9dSBen Hutchings 		EFX_BUG_ON_PARANOID(skb_headlen(skb) <
893c47b2d9dSBen Hutchings 				    nhoff + sizeof(*ip6) + 4);
894c47b2d9dSBen Hutchings 		spec.ip_proto = ip6->nexthdr;
895c47b2d9dSBen Hutchings 		memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
896c47b2d9dSBen Hutchings 		memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
897c47b2d9dSBen Hutchings 		ports = (const __be16 *)(ip6 + 1);
898c47b2d9dSBen Hutchings 	}
899c47b2d9dSBen Hutchings 
900c47b2d9dSBen Hutchings 	spec.rem_port = ports[0];
901c47b2d9dSBen Hutchings 	spec.loc_port = ports[1];
902add72477SBen Hutchings 
903add72477SBen Hutchings 	rc = efx->type->filter_rfs_insert(efx, &spec);
904add72477SBen Hutchings 	if (rc < 0)
905add72477SBen Hutchings 		return rc;
906add72477SBen Hutchings 
907add72477SBen Hutchings 	/* Remember this so we can check whether to expire the filter later */
908add72477SBen Hutchings 	efx->rps_flow_id[rc] = flow_id;
909add72477SBen Hutchings 	channel = efx_get_channel(efx, skb_get_rx_queue(skb));
910add72477SBen Hutchings 	++channel->rfs_filters_added;
911add72477SBen Hutchings 
912c47b2d9dSBen Hutchings 	if (ether_type == htons(ETH_P_IP))
913add72477SBen Hutchings 		netif_info(efx, rx_status, efx->net_dev,
914add72477SBen Hutchings 			   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
915c47b2d9dSBen Hutchings 			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
916c47b2d9dSBen Hutchings 			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
917c47b2d9dSBen Hutchings 			   ntohs(ports[1]), rxq_index, flow_id, rc);
918c47b2d9dSBen Hutchings 	else
919c47b2d9dSBen Hutchings 		netif_info(efx, rx_status, efx->net_dev,
920c47b2d9dSBen Hutchings 			   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
921c47b2d9dSBen Hutchings 			   (spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
922c47b2d9dSBen Hutchings 			   spec.rem_host, ntohs(ports[0]), spec.loc_host,
923c47b2d9dSBen Hutchings 			   ntohs(ports[1]), rxq_index, flow_id, rc);
924add72477SBen Hutchings 
925add72477SBen Hutchings 	return rc;
926add72477SBen Hutchings }
927add72477SBen Hutchings 
928add72477SBen Hutchings bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
929add72477SBen Hutchings {
930add72477SBen Hutchings 	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
931add72477SBen Hutchings 	unsigned int index, size;
932add72477SBen Hutchings 	u32 flow_id;
933add72477SBen Hutchings 
934add72477SBen Hutchings 	if (!spin_trylock_bh(&efx->filter_lock))
935add72477SBen Hutchings 		return false;
936add72477SBen Hutchings 
937add72477SBen Hutchings 	expire_one = efx->type->filter_rfs_expire_one;
938add72477SBen Hutchings 	index = efx->rps_expire_index;
939add72477SBen Hutchings 	size = efx->type->max_rx_ip_filters;
940add72477SBen Hutchings 	while (quota--) {
941add72477SBen Hutchings 		flow_id = efx->rps_flow_id[index];
942add72477SBen Hutchings 		if (expire_one(efx, flow_id, index))
943add72477SBen Hutchings 			netif_info(efx, rx_status, efx->net_dev,
944add72477SBen Hutchings 				   "expired filter %d [flow %u]\n",
945add72477SBen Hutchings 				   index, flow_id);
946add72477SBen Hutchings 		if (++index == size)
947add72477SBen Hutchings 			index = 0;
948add72477SBen Hutchings 	}
949add72477SBen Hutchings 	efx->rps_expire_index = index;
950add72477SBen Hutchings 
951add72477SBen Hutchings 	spin_unlock_bh(&efx->filter_lock);
952add72477SBen Hutchings 	return true;
953add72477SBen Hutchings }
954add72477SBen Hutchings 
955add72477SBen Hutchings #endif /* CONFIG_RFS_ACCEL */
956b883d0bdSBen Hutchings 
957b883d0bdSBen Hutchings /**
958b883d0bdSBen Hutchings  * efx_filter_is_mc_recipient - test whether spec is a multicast recipient
959b883d0bdSBen Hutchings  * @spec: Specification to test
960b883d0bdSBen Hutchings  *
961b883d0bdSBen Hutchings  * Return: %true if the specification is a non-drop RX filter that
962b883d0bdSBen Hutchings  * matches a local MAC address I/G bit value of 1 or matches a local
963b883d0bdSBen Hutchings  * IPv4 or IPv6 address value in the respective multicast address
964b883d0bdSBen Hutchings  * range.  Otherwise %false.
965b883d0bdSBen Hutchings  */
966b883d0bdSBen Hutchings bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec)
967b883d0bdSBen Hutchings {
968b883d0bdSBen Hutchings 	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
969b883d0bdSBen Hutchings 	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
970b883d0bdSBen Hutchings 		return false;
971b883d0bdSBen Hutchings 
972b883d0bdSBen Hutchings 	if (spec->match_flags &
973b883d0bdSBen Hutchings 	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
974b883d0bdSBen Hutchings 	    is_multicast_ether_addr(spec->loc_mac))
975b883d0bdSBen Hutchings 		return true;
976b883d0bdSBen Hutchings 
977b883d0bdSBen Hutchings 	if ((spec->match_flags &
978b883d0bdSBen Hutchings 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
979b883d0bdSBen Hutchings 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
980b883d0bdSBen Hutchings 		if (spec->ether_type == htons(ETH_P_IP) &&
981b883d0bdSBen Hutchings 		    ipv4_is_multicast(spec->loc_host[0]))
982b883d0bdSBen Hutchings 			return true;
983b883d0bdSBen Hutchings 		if (spec->ether_type == htons(ETH_P_IPV6) &&
984b883d0bdSBen Hutchings 		    ((const u8 *)spec->loc_host)[0] == 0xff)
985b883d0bdSBen Hutchings 			return true;
986b883d0bdSBen Hutchings 	}
987b883d0bdSBen Hutchings 
988b883d0bdSBen Hutchings 	return false;
989b883d0bdSBen Hutchings }
990