xref: /linux/drivers/net/ethernet/sfc/siena/rx_common.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3  * Driver for Solarflare network controllers and boards
4  * Copyright 2018 Solarflare Communications Inc.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License version 2 as published
8  * by the Free Software Foundation, incorporated herein by reference.
9  */
10 
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include <linux/iommu.h>
14 #include <net/rps.h>
15 #include "efx.h"
16 #include "nic.h"
17 #include "rx_common.h"
18 
19 /* This is the percentage fill level below which new RX descriptors
20  * will be added to the RX descriptor ring.
21  */
22 static unsigned int rx_refill_threshold;
23 module_param(rx_refill_threshold, uint, 0444);
24 MODULE_PARM_DESC(rx_refill_threshold,
25 		 "RX descriptor ring refill threshold (%)");
26 
27 /* RX maximum head room required.
28  *
29  * This must be at least 1 to prevent overflow, plus one packet-worth
30  * to allow pipelined receives.
31  */
32 #define EFX_RXD_HEAD_ROOM (1 + EFX_RX_MAX_FRAGS)
33 
34 static void efx_unmap_rx_buffer(struct efx_nic *efx,
35 				struct efx_rx_buffer *rx_buf);
36 
37 /* Check the RX page recycle ring for a page that can be reused. */
efx_reuse_page(struct efx_rx_queue * rx_queue)38 static struct page *efx_reuse_page(struct efx_rx_queue *rx_queue)
39 {
40 	struct efx_nic *efx = rx_queue->efx;
41 	struct efx_rx_page_state *state;
42 	unsigned int index;
43 	struct page *page;
44 
45 	if (unlikely(!rx_queue->page_ring))
46 		return NULL;
47 	index = rx_queue->page_remove & rx_queue->page_ptr_mask;
48 	page = rx_queue->page_ring[index];
49 	if (page == NULL)
50 		return NULL;
51 
52 	rx_queue->page_ring[index] = NULL;
53 	/* page_remove cannot exceed page_add. */
54 	if (rx_queue->page_remove != rx_queue->page_add)
55 		++rx_queue->page_remove;
56 
57 	/* If page_count is 1 then we hold the only reference to this page. */
58 	if (page_count(page) == 1) {
59 		++rx_queue->page_recycle_count;
60 		return page;
61 	} else {
62 		state = page_address(page);
63 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
64 			       PAGE_SIZE << efx->rx_buffer_order,
65 			       DMA_FROM_DEVICE);
66 		put_page(page);
67 		++rx_queue->page_recycle_failed;
68 	}
69 
70 	return NULL;
71 }
72 
73 /* Attempt to recycle the page if there is an RX recycle ring; the page can
74  * only be added if this is the final RX buffer, to prevent pages being used in
75  * the descriptor ring and appearing in the recycle ring simultaneously.
76  */
efx_recycle_rx_page(struct efx_channel * channel,struct efx_rx_buffer * rx_buf)77 static void efx_recycle_rx_page(struct efx_channel *channel,
78 				struct efx_rx_buffer *rx_buf)
79 {
80 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
81 	struct efx_nic *efx = rx_queue->efx;
82 	struct page *page = rx_buf->page;
83 	unsigned int index;
84 
85 	/* Only recycle the page after processing the final buffer. */
86 	if (!(rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE))
87 		return;
88 
89 	index = rx_queue->page_add & rx_queue->page_ptr_mask;
90 	if (rx_queue->page_ring[index] == NULL) {
91 		unsigned int read_index = rx_queue->page_remove &
92 			rx_queue->page_ptr_mask;
93 
94 		/* The next slot in the recycle ring is available, but
95 		 * increment page_remove if the read pointer currently
96 		 * points here.
97 		 */
98 		if (read_index == index)
99 			++rx_queue->page_remove;
100 		rx_queue->page_ring[index] = page;
101 		++rx_queue->page_add;
102 		return;
103 	}
104 	++rx_queue->page_recycle_full;
105 	efx_unmap_rx_buffer(efx, rx_buf);
106 	put_page(rx_buf->page);
107 }
108 
109 /* Recycle the pages that are used by buffers that have just been received. */
efx_siena_recycle_rx_pages(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags)110 void efx_siena_recycle_rx_pages(struct efx_channel *channel,
111 				struct efx_rx_buffer *rx_buf,
112 				unsigned int n_frags)
113 {
114 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
115 
116 	if (unlikely(!rx_queue->page_ring))
117 		return;
118 
119 	do {
120 		efx_recycle_rx_page(channel, rx_buf);
121 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
122 	} while (--n_frags);
123 }
124 
efx_siena_discard_rx_packet(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags)125 void efx_siena_discard_rx_packet(struct efx_channel *channel,
126 				 struct efx_rx_buffer *rx_buf,
127 				 unsigned int n_frags)
128 {
129 	struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
130 
131 	efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
132 
133 	efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
134 }
135 
efx_init_rx_recycle_ring(struct efx_rx_queue * rx_queue)136 static void efx_init_rx_recycle_ring(struct efx_rx_queue *rx_queue)
137 {
138 	unsigned int bufs_in_recycle_ring, page_ring_size;
139 	struct efx_nic *efx = rx_queue->efx;
140 
141 	bufs_in_recycle_ring = efx_rx_recycle_ring_size(efx);
142 	page_ring_size = roundup_pow_of_two(bufs_in_recycle_ring /
143 					    efx->rx_bufs_per_page);
144 	rx_queue->page_ring = kcalloc(page_ring_size,
145 				      sizeof(*rx_queue->page_ring), GFP_KERNEL);
146 	if (!rx_queue->page_ring)
147 		rx_queue->page_ptr_mask = 0;
148 	else
149 		rx_queue->page_ptr_mask = page_ring_size - 1;
150 }
151 
efx_fini_rx_recycle_ring(struct efx_rx_queue * rx_queue)152 static void efx_fini_rx_recycle_ring(struct efx_rx_queue *rx_queue)
153 {
154 	struct efx_nic *efx = rx_queue->efx;
155 	int i;
156 
157 	if (unlikely(!rx_queue->page_ring))
158 		return;
159 
160 	/* Unmap and release the pages in the recycle ring. Remove the ring. */
161 	for (i = 0; i <= rx_queue->page_ptr_mask; i++) {
162 		struct page *page = rx_queue->page_ring[i];
163 		struct efx_rx_page_state *state;
164 
165 		if (page == NULL)
166 			continue;
167 
168 		state = page_address(page);
169 		dma_unmap_page(&efx->pci_dev->dev, state->dma_addr,
170 			       PAGE_SIZE << efx->rx_buffer_order,
171 			       DMA_FROM_DEVICE);
172 		put_page(page);
173 	}
174 	kfree(rx_queue->page_ring);
175 	rx_queue->page_ring = NULL;
176 }
177 
efx_fini_rx_buffer(struct efx_rx_queue * rx_queue,struct efx_rx_buffer * rx_buf)178 static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
179 			       struct efx_rx_buffer *rx_buf)
180 {
181 	/* Release the page reference we hold for the buffer. */
182 	if (rx_buf->page)
183 		put_page(rx_buf->page);
184 
185 	/* If this is the last buffer in a page, unmap and free it. */
186 	if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
187 		efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
188 		efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
189 	}
190 	rx_buf->page = NULL;
191 }
192 
efx_siena_probe_rx_queue(struct efx_rx_queue * rx_queue)193 int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue)
194 {
195 	struct efx_nic *efx = rx_queue->efx;
196 	unsigned int entries;
197 	int rc;
198 
199 	/* Create the smallest power-of-two aligned ring */
200 	entries = max(roundup_pow_of_two(efx->rxq_entries), EFX_MIN_DMAQ_SIZE);
201 	EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
202 	rx_queue->ptr_mask = entries - 1;
203 
204 	netif_dbg(efx, probe, efx->net_dev,
205 		  "creating RX queue %d size %#x mask %#x\n",
206 		  efx_rx_queue_index(rx_queue), efx->rxq_entries,
207 		  rx_queue->ptr_mask);
208 
209 	/* Allocate RX buffers */
210 	rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
211 				   GFP_KERNEL);
212 	if (!rx_queue->buffer)
213 		return -ENOMEM;
214 
215 	rc = efx_nic_probe_rx(rx_queue);
216 	if (rc) {
217 		kfree(rx_queue->buffer);
218 		rx_queue->buffer = NULL;
219 	}
220 
221 	return rc;
222 }
223 
efx_siena_init_rx_queue(struct efx_rx_queue * rx_queue)224 void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue)
225 {
226 	unsigned int max_fill, trigger, max_trigger;
227 	struct efx_nic *efx = rx_queue->efx;
228 	int rc = 0;
229 
230 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
231 		  "initialising RX queue %d\n", efx_rx_queue_index(rx_queue));
232 
233 	/* Initialise ptr fields */
234 	rx_queue->added_count = 0;
235 	rx_queue->notified_count = 0;
236 	rx_queue->removed_count = 0;
237 	rx_queue->min_fill = -1U;
238 	efx_init_rx_recycle_ring(rx_queue);
239 
240 	rx_queue->page_remove = 0;
241 	rx_queue->page_add = rx_queue->page_ptr_mask + 1;
242 	rx_queue->page_recycle_count = 0;
243 	rx_queue->page_recycle_failed = 0;
244 	rx_queue->page_recycle_full = 0;
245 
246 	/* Initialise limit fields */
247 	max_fill = efx->rxq_entries - EFX_RXD_HEAD_ROOM;
248 	max_trigger =
249 		max_fill - efx->rx_pages_per_batch * efx->rx_bufs_per_page;
250 	if (rx_refill_threshold != 0) {
251 		trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
252 		if (trigger > max_trigger)
253 			trigger = max_trigger;
254 	} else {
255 		trigger = max_trigger;
256 	}
257 
258 	rx_queue->max_fill = max_fill;
259 	rx_queue->fast_fill_trigger = trigger;
260 	rx_queue->refill_enabled = true;
261 
262 	/* Initialise XDP queue information */
263 	rc = xdp_rxq_info_reg(&rx_queue->xdp_rxq_info, efx->net_dev,
264 			      rx_queue->core_index, 0);
265 
266 	if (rc) {
267 		netif_err(efx, rx_err, efx->net_dev,
268 			  "Failure to initialise XDP queue information rc=%d\n",
269 			  rc);
270 		efx->xdp_rxq_info_failed = true;
271 	} else {
272 		rx_queue->xdp_rxq_info_valid = true;
273 	}
274 
275 	/* Set up RX descriptor ring */
276 	efx_nic_init_rx(rx_queue);
277 }
278 
efx_siena_fini_rx_queue(struct efx_rx_queue * rx_queue)279 void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue)
280 {
281 	struct efx_rx_buffer *rx_buf;
282 	int i;
283 
284 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
285 		  "shutting down RX queue %d\n", efx_rx_queue_index(rx_queue));
286 
287 	del_timer_sync(&rx_queue->slow_fill);
288 
289 	/* Release RX buffers from the current read ptr to the write ptr */
290 	if (rx_queue->buffer) {
291 		for (i = rx_queue->removed_count; i < rx_queue->added_count;
292 		     i++) {
293 			unsigned int index = i & rx_queue->ptr_mask;
294 
295 			rx_buf = efx_rx_buffer(rx_queue, index);
296 			efx_fini_rx_buffer(rx_queue, rx_buf);
297 		}
298 	}
299 
300 	efx_fini_rx_recycle_ring(rx_queue);
301 
302 	if (rx_queue->xdp_rxq_info_valid)
303 		xdp_rxq_info_unreg(&rx_queue->xdp_rxq_info);
304 
305 	rx_queue->xdp_rxq_info_valid = false;
306 }
307 
efx_siena_remove_rx_queue(struct efx_rx_queue * rx_queue)308 void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue)
309 {
310 	netif_dbg(rx_queue->efx, drv, rx_queue->efx->net_dev,
311 		  "destroying RX queue %d\n", efx_rx_queue_index(rx_queue));
312 
313 	efx_nic_remove_rx(rx_queue);
314 
315 	kfree(rx_queue->buffer);
316 	rx_queue->buffer = NULL;
317 }
318 
319 /* Unmap a DMA-mapped page.  This function is only called for the final RX
320  * buffer in a page.
321  */
efx_unmap_rx_buffer(struct efx_nic * efx,struct efx_rx_buffer * rx_buf)322 static void efx_unmap_rx_buffer(struct efx_nic *efx,
323 				struct efx_rx_buffer *rx_buf)
324 {
325 	struct page *page = rx_buf->page;
326 
327 	if (page) {
328 		struct efx_rx_page_state *state = page_address(page);
329 
330 		dma_unmap_page(&efx->pci_dev->dev,
331 			       state->dma_addr,
332 			       PAGE_SIZE << efx->rx_buffer_order,
333 			       DMA_FROM_DEVICE);
334 	}
335 }
336 
efx_siena_free_rx_buffers(struct efx_rx_queue * rx_queue,struct efx_rx_buffer * rx_buf,unsigned int num_bufs)337 void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
338 			       struct efx_rx_buffer *rx_buf,
339 			       unsigned int num_bufs)
340 {
341 	do {
342 		if (rx_buf->page) {
343 			put_page(rx_buf->page);
344 			rx_buf->page = NULL;
345 		}
346 		rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
347 	} while (--num_bufs);
348 }
349 
efx_siena_rx_slow_fill(struct timer_list * t)350 void efx_siena_rx_slow_fill(struct timer_list *t)
351 {
352 	struct efx_rx_queue *rx_queue = from_timer(rx_queue, t, slow_fill);
353 
354 	/* Post an event to cause NAPI to run and refill the queue */
355 	efx_nic_generate_fill_event(rx_queue);
356 	++rx_queue->slow_fill_count;
357 }
358 
efx_schedule_slow_fill(struct efx_rx_queue * rx_queue)359 static void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
360 {
361 	mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(10));
362 }
363 
364 /* efx_init_rx_buffers - create EFX_RX_BATCH page-based RX buffers
365  *
366  * @rx_queue:		Efx RX queue
367  *
368  * This allocates a batch of pages, maps them for DMA, and populates
369  * struct efx_rx_buffers for each one. Return a negative error code or
370  * 0 on success. If a single page can be used for multiple buffers,
371  * then the page will either be inserted fully, or not at all.
372  */
efx_init_rx_buffers(struct efx_rx_queue * rx_queue,bool atomic)373 static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue, bool atomic)
374 {
375 	unsigned int page_offset, index, count;
376 	struct efx_nic *efx = rx_queue->efx;
377 	struct efx_rx_page_state *state;
378 	struct efx_rx_buffer *rx_buf;
379 	dma_addr_t dma_addr;
380 	struct page *page;
381 
382 	count = 0;
383 	do {
384 		page = efx_reuse_page(rx_queue);
385 		if (page == NULL) {
386 			page = alloc_pages(__GFP_COMP |
387 					   (atomic ? GFP_ATOMIC : GFP_KERNEL),
388 					   efx->rx_buffer_order);
389 			if (unlikely(page == NULL))
390 				return -ENOMEM;
391 			dma_addr =
392 				dma_map_page(&efx->pci_dev->dev, page, 0,
393 					     PAGE_SIZE << efx->rx_buffer_order,
394 					     DMA_FROM_DEVICE);
395 			if (unlikely(dma_mapping_error(&efx->pci_dev->dev,
396 						       dma_addr))) {
397 				__free_pages(page, efx->rx_buffer_order);
398 				return -EIO;
399 			}
400 			state = page_address(page);
401 			state->dma_addr = dma_addr;
402 		} else {
403 			state = page_address(page);
404 			dma_addr = state->dma_addr;
405 		}
406 
407 		dma_addr += sizeof(struct efx_rx_page_state);
408 		page_offset = sizeof(struct efx_rx_page_state);
409 
410 		do {
411 			index = rx_queue->added_count & rx_queue->ptr_mask;
412 			rx_buf = efx_rx_buffer(rx_queue, index);
413 			rx_buf->dma_addr = dma_addr + efx->rx_ip_align +
414 					   EFX_XDP_HEADROOM;
415 			rx_buf->page = page;
416 			rx_buf->page_offset = page_offset + efx->rx_ip_align +
417 					      EFX_XDP_HEADROOM;
418 			rx_buf->len = efx->rx_dma_len;
419 			rx_buf->flags = 0;
420 			++rx_queue->added_count;
421 			get_page(page);
422 			dma_addr += efx->rx_page_buf_step;
423 			page_offset += efx->rx_page_buf_step;
424 		} while (page_offset + efx->rx_page_buf_step <= PAGE_SIZE);
425 
426 		rx_buf->flags = EFX_RX_BUF_LAST_IN_PAGE;
427 	} while (++count < efx->rx_pages_per_batch);
428 
429 	return 0;
430 }
431 
efx_siena_rx_config_page_split(struct efx_nic * efx)432 void efx_siena_rx_config_page_split(struct efx_nic *efx)
433 {
434 	efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align +
435 				      EFX_XDP_HEADROOM + EFX_XDP_TAILROOM,
436 				      EFX_RX_BUF_ALIGNMENT);
437 	efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
438 		((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
439 		efx->rx_page_buf_step);
440 	efx->rx_buffer_truesize = (PAGE_SIZE << efx->rx_buffer_order) /
441 		efx->rx_bufs_per_page;
442 	efx->rx_pages_per_batch = DIV_ROUND_UP(EFX_RX_PREFERRED_BATCH,
443 					       efx->rx_bufs_per_page);
444 }
445 
446 /* efx_siena_fast_push_rx_descriptors - push new RX descriptors quickly
447  * @rx_queue:		RX descriptor queue
448  *
449  * This will aim to fill the RX descriptor queue up to
450  * @rx_queue->@max_fill. If there is insufficient atomic
451  * memory to do so, a slow fill will be scheduled.
452  *
453  * The caller must provide serialisation (none is used here). In practise,
454  * this means this function must run from the NAPI handler, or be called
455  * when NAPI is disabled.
456  */
efx_siena_fast_push_rx_descriptors(struct efx_rx_queue * rx_queue,bool atomic)457 void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
458 					bool atomic)
459 {
460 	struct efx_nic *efx = rx_queue->efx;
461 	unsigned int fill_level, batch_size;
462 	int space, rc = 0;
463 
464 	if (!rx_queue->refill_enabled)
465 		return;
466 
467 	/* Calculate current fill level, and exit if we don't need to fill */
468 	fill_level = (rx_queue->added_count - rx_queue->removed_count);
469 	EFX_WARN_ON_ONCE_PARANOID(fill_level > rx_queue->efx->rxq_entries);
470 	if (fill_level >= rx_queue->fast_fill_trigger)
471 		goto out;
472 
473 	/* Record minimum fill level */
474 	if (unlikely(fill_level < rx_queue->min_fill)) {
475 		if (fill_level)
476 			rx_queue->min_fill = fill_level;
477 	}
478 
479 	batch_size = efx->rx_pages_per_batch * efx->rx_bufs_per_page;
480 	space = rx_queue->max_fill - fill_level;
481 	EFX_WARN_ON_ONCE_PARANOID(space < batch_size);
482 
483 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
484 		   "RX queue %d fast-filling descriptor ring from"
485 		   " level %d to level %d\n",
486 		   efx_rx_queue_index(rx_queue), fill_level,
487 		   rx_queue->max_fill);
488 
489 	do {
490 		rc = efx_init_rx_buffers(rx_queue, atomic);
491 		if (unlikely(rc)) {
492 			/* Ensure that we don't leave the rx queue empty */
493 			efx_schedule_slow_fill(rx_queue);
494 			goto out;
495 		}
496 	} while ((space -= batch_size) >= batch_size);
497 
498 	netif_vdbg(rx_queue->efx, rx_status, rx_queue->efx->net_dev,
499 		   "RX queue %d fast-filled descriptor ring "
500 		   "to level %d\n", efx_rx_queue_index(rx_queue),
501 		   rx_queue->added_count - rx_queue->removed_count);
502 
503  out:
504 	if (rx_queue->notified_count != rx_queue->added_count)
505 		efx_nic_notify_rx_desc(rx_queue);
506 }
507 
508 /* Pass a received packet up through GRO.  GRO can handle pages
509  * regardless of checksum state and skbs with a good checksum.
510  */
511 void
efx_siena_rx_packet_gro(struct efx_channel * channel,struct efx_rx_buffer * rx_buf,unsigned int n_frags,u8 * eh,__wsum csum)512 efx_siena_rx_packet_gro(struct efx_channel *channel,
513 			struct efx_rx_buffer *rx_buf,
514 			unsigned int n_frags, u8 *eh, __wsum csum)
515 {
516 	struct napi_struct *napi = &channel->napi_str;
517 	struct efx_nic *efx = channel->efx;
518 	struct sk_buff *skb;
519 
520 	skb = napi_get_frags(napi);
521 	if (unlikely(!skb)) {
522 		struct efx_rx_queue *rx_queue;
523 
524 		rx_queue = efx_channel_get_rx_queue(channel);
525 		efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
526 		return;
527 	}
528 
529 	if (efx->net_dev->features & NETIF_F_RXHASH)
530 		skb_set_hash(skb, efx_rx_buf_hash(efx, eh),
531 			     PKT_HASH_TYPE_L3);
532 	if (csum) {
533 		skb->csum = csum;
534 		skb->ip_summed = CHECKSUM_COMPLETE;
535 	} else {
536 		skb->ip_summed = ((rx_buf->flags & EFX_RX_PKT_CSUMMED) ?
537 				  CHECKSUM_UNNECESSARY : CHECKSUM_NONE);
538 	}
539 	skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
540 
541 	for (;;) {
542 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
543 				   rx_buf->page, rx_buf->page_offset,
544 				   rx_buf->len);
545 		rx_buf->page = NULL;
546 		skb->len += rx_buf->len;
547 		if (skb_shinfo(skb)->nr_frags == n_frags)
548 			break;
549 
550 		rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
551 	}
552 
553 	skb->data_len = skb->len;
554 	skb->truesize += n_frags * efx->rx_buffer_truesize;
555 
556 	skb_record_rx_queue(skb, channel->rx_queue.core_index);
557 
558 	napi_gro_frags(napi);
559 }
560 
efx_siena_set_default_rx_indir_table(struct efx_nic * efx,struct efx_rss_context * ctx)561 void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
562 					  struct efx_rss_context *ctx)
563 {
564 	size_t i;
565 
566 	for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++)
567 		ctx->rx_indir_table[i] =
568 			ethtool_rxfh_indir_default(i, efx->rss_spread);
569 }
570 
571 /**
572  * efx_siena_filter_is_mc_recipient - test whether spec is a multicast recipient
573  * @spec: Specification to test
574  *
575  * Return: %true if the specification is a non-drop RX filter that
576  * matches a local MAC address I/G bit value of 1 or matches a local
577  * IPv4 or IPv6 address value in the respective multicast address
578  * range.  Otherwise %false.
579  */
efx_siena_filter_is_mc_recipient(const struct efx_filter_spec * spec)580 bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec)
581 {
582 	if (!(spec->flags & EFX_FILTER_FLAG_RX) ||
583 	    spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP)
584 		return false;
585 
586 	if (spec->match_flags &
587 	    (EFX_FILTER_MATCH_LOC_MAC | EFX_FILTER_MATCH_LOC_MAC_IG) &&
588 	    is_multicast_ether_addr(spec->loc_mac))
589 		return true;
590 
591 	if ((spec->match_flags &
592 	     (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
593 	    (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
594 		if (spec->ether_type == htons(ETH_P_IP) &&
595 		    ipv4_is_multicast(spec->loc_host[0]))
596 			return true;
597 		if (spec->ether_type == htons(ETH_P_IPV6) &&
598 		    ((const u8 *)spec->loc_host)[0] == 0xff)
599 			return true;
600 	}
601 
602 	return false;
603 }
604 
efx_siena_filter_spec_equal(const struct efx_filter_spec * left,const struct efx_filter_spec * right)605 bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
606 				 const struct efx_filter_spec *right)
607 {
608 	if ((left->match_flags ^ right->match_flags) |
609 	    ((left->flags ^ right->flags) &
610 	     (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
611 		return false;
612 
613 	return memcmp(&left->outer_vid, &right->outer_vid,
614 		      sizeof(struct efx_filter_spec) -
615 		      offsetof(struct efx_filter_spec, outer_vid)) == 0;
616 }
617 
efx_siena_filter_spec_hash(const struct efx_filter_spec * spec)618 u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec)
619 {
620 	BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
621 	return jhash2((const u32 *)&spec->outer_vid,
622 		      (sizeof(struct efx_filter_spec) -
623 		       offsetof(struct efx_filter_spec, outer_vid)) / 4,
624 		      0);
625 }
626 
627 #ifdef CONFIG_RFS_ACCEL
efx_siena_rps_check_rule(struct efx_arfs_rule * rule,unsigned int filter_idx,bool * force)628 bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
629 			      unsigned int filter_idx, bool *force)
630 {
631 	if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) {
632 		/* ARFS is currently updating this entry, leave it */
633 		return false;
634 	}
635 	if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) {
636 		/* ARFS tried and failed to update this, so it's probably out
637 		 * of date.  Remove the filter and the ARFS rule entry.
638 		 */
639 		rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
640 		*force = true;
641 		return true;
642 	} else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */
643 		/* ARFS has moved on, so old filter is not needed.  Since we did
644 		 * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will
645 		 * not be removed by efx_siena_rps_hash_del() subsequently.
646 		 */
647 		*force = true;
648 		return true;
649 	}
650 	/* Remove it iff ARFS wants to. */
651 	return true;
652 }
653 
654 static
efx_rps_hash_bucket(struct efx_nic * efx,const struct efx_filter_spec * spec)655 struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
656 				       const struct efx_filter_spec *spec)
657 {
658 	u32 hash = efx_siena_filter_spec_hash(spec);
659 
660 	lockdep_assert_held(&efx->rps_hash_lock);
661 	if (!efx->rps_hash_table)
662 		return NULL;
663 	return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE];
664 }
665 
efx_siena_rps_hash_find(struct efx_nic * efx,const struct efx_filter_spec * spec)666 struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
667 					const struct efx_filter_spec *spec)
668 {
669 	struct efx_arfs_rule *rule;
670 	struct hlist_head *head;
671 	struct hlist_node *node;
672 
673 	head = efx_rps_hash_bucket(efx, spec);
674 	if (!head)
675 		return NULL;
676 	hlist_for_each(node, head) {
677 		rule = container_of(node, struct efx_arfs_rule, node);
678 		if (efx_siena_filter_spec_equal(spec, &rule->spec))
679 			return rule;
680 	}
681 	return NULL;
682 }
683 
efx_rps_hash_add(struct efx_nic * efx,const struct efx_filter_spec * spec,bool * new)684 static struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx,
685 					const struct efx_filter_spec *spec,
686 					bool *new)
687 {
688 	struct efx_arfs_rule *rule;
689 	struct hlist_head *head;
690 	struct hlist_node *node;
691 
692 	head = efx_rps_hash_bucket(efx, spec);
693 	if (!head)
694 		return NULL;
695 	hlist_for_each(node, head) {
696 		rule = container_of(node, struct efx_arfs_rule, node);
697 		if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
698 			*new = false;
699 			return rule;
700 		}
701 	}
702 	rule = kmalloc(sizeof(*rule), GFP_ATOMIC);
703 	*new = true;
704 	if (rule) {
705 		memcpy(&rule->spec, spec, sizeof(rule->spec));
706 		hlist_add_head(&rule->node, head);
707 	}
708 	return rule;
709 }
710 
efx_siena_rps_hash_del(struct efx_nic * efx,const struct efx_filter_spec * spec)711 void efx_siena_rps_hash_del(struct efx_nic *efx,
712 			    const struct efx_filter_spec *spec)
713 {
714 	struct efx_arfs_rule *rule;
715 	struct hlist_head *head;
716 	struct hlist_node *node;
717 
718 	head = efx_rps_hash_bucket(efx, spec);
719 	if (WARN_ON(!head))
720 		return;
721 	hlist_for_each(node, head) {
722 		rule = container_of(node, struct efx_arfs_rule, node);
723 		if (efx_siena_filter_spec_equal(spec, &rule->spec)) {
724 			/* Someone already reused the entry.  We know that if
725 			 * this check doesn't fire (i.e. filter_id == REMOVING)
726 			 * then the REMOVING mark was put there by our caller,
727 			 * because caller is holding a lock on filter table and
728 			 * only holders of that lock set REMOVING.
729 			 */
730 			if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING)
731 				return;
732 			hlist_del(node);
733 			kfree(rule);
734 			return;
735 		}
736 	}
737 	/* We didn't find it. */
738 	WARN_ON(1);
739 }
740 #endif
741 
efx_siena_probe_filters(struct efx_nic * efx)742 int efx_siena_probe_filters(struct efx_nic *efx)
743 {
744 	int rc;
745 
746 	mutex_lock(&efx->mac_lock);
747 	down_write(&efx->filter_sem);
748 	rc = efx->type->filter_table_probe(efx);
749 	if (rc)
750 		goto out_unlock;
751 
752 #ifdef CONFIG_RFS_ACCEL
753 	if (efx->type->offload_features & NETIF_F_NTUPLE) {
754 		struct efx_channel *channel;
755 		int i, success = 1;
756 
757 		efx_for_each_channel(channel, efx) {
758 			channel->rps_flow_id =
759 				kcalloc(efx->type->max_rx_ip_filters,
760 					sizeof(*channel->rps_flow_id),
761 					GFP_KERNEL);
762 			if (!channel->rps_flow_id)
763 				success = 0;
764 			else
765 				for (i = 0;
766 				     i < efx->type->max_rx_ip_filters;
767 				     ++i)
768 					channel->rps_flow_id[i] =
769 						RPS_FLOW_ID_INVALID;
770 			channel->rfs_expire_index = 0;
771 			channel->rfs_filter_count = 0;
772 		}
773 
774 		if (!success) {
775 			efx_for_each_channel(channel, efx)
776 				kfree(channel->rps_flow_id);
777 			efx->type->filter_table_remove(efx);
778 			rc = -ENOMEM;
779 			goto out_unlock;
780 		}
781 	}
782 #endif
783 out_unlock:
784 	up_write(&efx->filter_sem);
785 	mutex_unlock(&efx->mac_lock);
786 	return rc;
787 }
788 
efx_siena_remove_filters(struct efx_nic * efx)789 void efx_siena_remove_filters(struct efx_nic *efx)
790 {
791 #ifdef CONFIG_RFS_ACCEL
792 	struct efx_channel *channel;
793 
794 	efx_for_each_channel(channel, efx) {
795 		cancel_delayed_work_sync(&channel->filter_work);
796 		kfree(channel->rps_flow_id);
797 		channel->rps_flow_id = NULL;
798 	}
799 #endif
800 	down_write(&efx->filter_sem);
801 	efx->type->filter_table_remove(efx);
802 	up_write(&efx->filter_sem);
803 }
804 
805 #ifdef CONFIG_RFS_ACCEL
806 
efx_filter_rfs_work(struct work_struct * data)807 static void efx_filter_rfs_work(struct work_struct *data)
808 {
809 	struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
810 							      work);
811 	struct efx_nic *efx = netdev_priv(req->net_dev);
812 	struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
813 	int slot_idx = req - efx->rps_slot;
814 	struct efx_arfs_rule *rule;
815 	u16 arfs_id = 0;
816 	int rc;
817 
818 	rc = efx->type->filter_insert(efx, &req->spec, true);
819 	if (rc >= 0)
820 		/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
821 		rc %= efx->type->max_rx_ip_filters;
822 	if (efx->rps_hash_table) {
823 		spin_lock_bh(&efx->rps_hash_lock);
824 		rule = efx_siena_rps_hash_find(efx, &req->spec);
825 		/* The rule might have already gone, if someone else's request
826 		 * for the same spec was already worked and then expired before
827 		 * we got around to our work.  In that case we have nothing
828 		 * tying us to an arfs_id, meaning that as soon as the filter
829 		 * is considered for expiry it will be removed.
830 		 */
831 		if (rule) {
832 			if (rc < 0)
833 				rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
834 			else
835 				rule->filter_id = rc;
836 			arfs_id = rule->arfs_id;
837 		}
838 		spin_unlock_bh(&efx->rps_hash_lock);
839 	}
840 	if (rc >= 0) {
841 		/* Remember this so we can check whether to expire the filter
842 		 * later.
843 		 */
844 		mutex_lock(&efx->rps_mutex);
845 		if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
846 			channel->rfs_filter_count++;
847 		channel->rps_flow_id[rc] = req->flow_id;
848 		mutex_unlock(&efx->rps_mutex);
849 
850 		if (req->spec.ether_type == htons(ETH_P_IP))
851 			netif_info(efx, rx_status, efx->net_dev,
852 				   "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
853 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
854 				   req->spec.rem_host, ntohs(req->spec.rem_port),
855 				   req->spec.loc_host, ntohs(req->spec.loc_port),
856 				   req->rxq_index, req->flow_id, rc, arfs_id);
857 		else
858 			netif_info(efx, rx_status, efx->net_dev,
859 				   "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
860 				   (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
861 				   req->spec.rem_host, ntohs(req->spec.rem_port),
862 				   req->spec.loc_host, ntohs(req->spec.loc_port),
863 				   req->rxq_index, req->flow_id, rc, arfs_id);
864 		channel->n_rfs_succeeded++;
865 	} else {
866 		if (req->spec.ether_type == htons(ETH_P_IP))
867 			netif_dbg(efx, rx_status, efx->net_dev,
868 				  "failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
869 				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
870 				  req->spec.rem_host, ntohs(req->spec.rem_port),
871 				  req->spec.loc_host, ntohs(req->spec.loc_port),
872 				  req->rxq_index, req->flow_id, rc, arfs_id);
873 		else
874 			netif_dbg(efx, rx_status, efx->net_dev,
875 				  "failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
876 				  (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
877 				  req->spec.rem_host, ntohs(req->spec.rem_port),
878 				  req->spec.loc_host, ntohs(req->spec.loc_port),
879 				  req->rxq_index, req->flow_id, rc, arfs_id);
880 		channel->n_rfs_failed++;
881 		/* We're overloading the NIC's filter tables, so let's do a
882 		 * chunk of extra expiry work.
883 		 */
884 		__efx_siena_filter_rfs_expire(channel,
885 					      min(channel->rfs_filter_count,
886 						  100u));
887 	}
888 
889 	/* Release references */
890 	clear_bit(slot_idx, &efx->rps_slot_map);
891 	dev_put(req->net_dev);
892 }
893 
efx_siena_filter_rfs(struct net_device * net_dev,const struct sk_buff * skb,u16 rxq_index,u32 flow_id)894 int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
895 			 u16 rxq_index, u32 flow_id)
896 {
897 	struct efx_nic *efx = netdev_priv(net_dev);
898 	struct efx_async_filter_insertion *req;
899 	struct efx_arfs_rule *rule;
900 	struct flow_keys fk;
901 	int slot_idx;
902 	bool new;
903 	int rc;
904 
905 	/* find a free slot */
906 	for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
907 		if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
908 			break;
909 	if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
910 		return -EBUSY;
911 
912 	if (flow_id == RPS_FLOW_ID_INVALID) {
913 		rc = -EINVAL;
914 		goto out_clear;
915 	}
916 
917 	if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
918 		rc = -EPROTONOSUPPORT;
919 		goto out_clear;
920 	}
921 
922 	if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
923 		rc = -EPROTONOSUPPORT;
924 		goto out_clear;
925 	}
926 	if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
927 		rc = -EPROTONOSUPPORT;
928 		goto out_clear;
929 	}
930 
931 	req = efx->rps_slot + slot_idx;
932 	efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
933 			   efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
934 			   rxq_index);
935 	req->spec.match_flags =
936 		EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
937 		EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
938 		EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
939 	req->spec.ether_type = fk.basic.n_proto;
940 	req->spec.ip_proto = fk.basic.ip_proto;
941 
942 	if (fk.basic.n_proto == htons(ETH_P_IP)) {
943 		req->spec.rem_host[0] = fk.addrs.v4addrs.src;
944 		req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
945 	} else {
946 		memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
947 		       sizeof(struct in6_addr));
948 		memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
949 		       sizeof(struct in6_addr));
950 	}
951 
952 	req->spec.rem_port = fk.ports.src;
953 	req->spec.loc_port = fk.ports.dst;
954 
955 	if (efx->rps_hash_table) {
956 		/* Add it to ARFS hash table */
957 		spin_lock(&efx->rps_hash_lock);
958 		rule = efx_rps_hash_add(efx, &req->spec, &new);
959 		if (!rule) {
960 			rc = -ENOMEM;
961 			goto out_unlock;
962 		}
963 		if (new)
964 			rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
965 		rc = rule->arfs_id;
966 		/* Skip if existing or pending filter already does the right thing */
967 		if (!new && rule->rxq_index == rxq_index &&
968 		    rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
969 			goto out_unlock;
970 		rule->rxq_index = rxq_index;
971 		rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
972 		spin_unlock(&efx->rps_hash_lock);
973 	} else {
974 		/* Without an ARFS hash table, we just use arfs_id 0 for all
975 		 * filters.  This means if multiple flows hash to the same
976 		 * flow_id, all but the most recently touched will be eligible
977 		 * for expiry.
978 		 */
979 		rc = 0;
980 	}
981 
982 	/* Queue the request */
983 	dev_hold(req->net_dev = net_dev);
984 	INIT_WORK(&req->work, efx_filter_rfs_work);
985 	req->rxq_index = rxq_index;
986 	req->flow_id = flow_id;
987 	schedule_work(&req->work);
988 	return rc;
989 out_unlock:
990 	spin_unlock(&efx->rps_hash_lock);
991 out_clear:
992 	clear_bit(slot_idx, &efx->rps_slot_map);
993 	return rc;
994 }
995 
__efx_siena_filter_rfs_expire(struct efx_channel * channel,unsigned int quota)996 bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
997 				   unsigned int quota)
998 {
999 	bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
1000 	struct efx_nic *efx = channel->efx;
1001 	unsigned int index, size, start;
1002 	u32 flow_id;
1003 
1004 	if (!mutex_trylock(&efx->rps_mutex))
1005 		return false;
1006 	expire_one = efx->type->filter_rfs_expire_one;
1007 	index = channel->rfs_expire_index;
1008 	start = index;
1009 	size = efx->type->max_rx_ip_filters;
1010 	while (quota) {
1011 		flow_id = channel->rps_flow_id[index];
1012 
1013 		if (flow_id != RPS_FLOW_ID_INVALID) {
1014 			quota--;
1015 			if (expire_one(efx, flow_id, index)) {
1016 				netif_info(efx, rx_status, efx->net_dev,
1017 					   "expired filter %d [channel %u flow %u]\n",
1018 					   index, channel->channel, flow_id);
1019 				channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
1020 				channel->rfs_filter_count--;
1021 			}
1022 		}
1023 		if (++index == size)
1024 			index = 0;
1025 		/* If we were called with a quota that exceeds the total number
1026 		 * of filters in the table (which shouldn't happen, but could
1027 		 * if two callers race), ensure that we don't loop forever -
1028 		 * stop when we've examined every row of the table.
1029 		 */
1030 		if (index == start)
1031 			break;
1032 	}
1033 
1034 	channel->rfs_expire_index = index;
1035 	mutex_unlock(&efx->rps_mutex);
1036 	return true;
1037 }
1038 
1039 #endif /* CONFIG_RFS_ACCEL */
1040